diff --git a/go.mod b/go.mod index 8ed99cdf82c68..aa385b954584c 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,8 @@ go 1.18 require ( cloud.google.com/go/bigtable v1.3.0 - cloud.google.com/go/pubsub v1.3.1 - cloud.google.com/go/storage v1.10.0 + cloud.google.com/go/pubsub v1.27.1 + cloud.google.com/go/storage v1.29.0 github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-storage-blob-go v0.13.0 github.com/Azure/go-autorest/autorest/adal v0.9.20 @@ -44,9 +44,9 @@ require ( github.com/gogo/status v1.1.0 github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.4 - github.com/google/go-cmp v0.5.8 + github.com/google/go-cmp v0.5.9 github.com/google/renameio/v2 v2.0.0 - github.com/google/uuid v1.2.0 + github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.4.2 github.com/grafana/dskit v0.0.0-20220809080451-26c1b619d059 @@ -88,7 +88,7 @@ require ( github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 github.com/sony/gobreaker v0.4.1 github.com/spf13/afero v1.6.0 - github.com/stretchr/testify v1.7.2 + github.com/stretchr/testify v1.8.1 github.com/thanos-io/thanos v0.22.0 github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 github.com/uber/jaeger-client-go v2.30.0+incompatible @@ -98,12 +98,12 @@ require ( go.uber.org/atomic v1.9.0 go.uber.org/goleak v1.1.12 golang.org/x/crypto v0.0.0-20220214200702-86341886e292 - golang.org/x/net v0.0.0-20220706163947-c90051bbdb60 - golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f - golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b - golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 - google.golang.org/api v0.83.0 - google.golang.org/grpc v1.47.0 + golang.org/x/net v0.0.0-20221014081412-f15817d10f9b + golang.org/x/sync v0.1.0 + golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 + golang.org/x/time v0.1.0 + google.golang.org/api v0.106.0 + google.golang.org/grpc v1.51.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/yaml.v2 v2.4.0 @@ -119,14 +119,15 @@ require ( github.com/prometheus/alertmanager v0.24.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/willf/bloom v2.0.3+incompatible - golang.org/x/text v0.3.7 + golang.org/x/text v0.5.0 ) require ( - cloud.google.com/go v0.100.2 // indirect - cloud.google.com/go/compute v1.6.1 // indirect - cloud.google.com/go/iam v0.1.0 // indirect - cloud.google.com/go/kms v1.0.0 // indirect + cloud.google.com/go v0.107.0 // indirect + cloud.google.com/go/compute v1.14.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/iam v0.8.0 // indirect + cloud.google.com/go/longrunning v0.3.0 // indirect github.com/Azure/azure-sdk-for-go v65.0.0+incompatible // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect @@ -187,7 +188,8 @@ require ( github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20220520215854-d04f2422c8a1 // indirect - github.com/googleapis/gax-go/v2 v2.4.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect + github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/gophercloud/gophercloud v0.25.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 // indirect @@ -242,7 +244,7 @@ require ( github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 // indirect github.com/spf13/cast v1.3.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/ugorji/go/codec v1.1.7 // indirect github.com/weaveworks/promrus v1.2.0 // indirect @@ -254,7 +256,7 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.0 // indirect go.etcd.io/etcd/client/v3 v3.5.0 // indirect go.mongodb.org/mongo-driver v1.8.3 // indirect - go.opencensus.io v0.23.0 // indirect + go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0 // indirect go.opentelemetry.io/otel v1.7.0 // indirect go.opentelemetry.io/otel/metric v0.30.0 // indirect @@ -263,14 +265,14 @@ require ( go.uber.org/zap v1.19.1 // indirect go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect - golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 // indirect + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect + golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/tools v0.1.10 // indirect - golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect + golang.org/x/tools v0.1.12 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 // indirect - google.golang.org/protobuf v1.28.0 // indirect + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect + google.golang.org/protobuf v1.28.1 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.57.0 // indirect diff --git a/go.sum b/go.sum index 0db6a8572068c..de366fae02f57 100644 --- a/go.sum +++ b/go.sum @@ -28,9 +28,9 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -43,25 +43,31 @@ cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTB cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1 h1:2sMmt8prCn7DPaG4Pmh0N3Inmc8cT8ae5k1M6VJ9Wqc= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.1.0 h1:W2vbGCrE3Z7J/x3WXLxxGl9LMSB2uhsAA7Ss/6u/qRY= -cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= -cloud.google.com/go/kms v1.0.0 h1:YkIeqPXqTAlwXk3Z2/WG0d6h1tqJQjU354WftjEoP9E= -cloud.google.com/go/kms v1.0.0/go.mod h1:nhUehi+w7zht2XrUfvTRNpxrfayBHqP4lu2NSywui/0= +cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/kms v1.6.0 h1:OWRZzrPmOZUzurjI2FBGtgY2mB1WaJkqhw6oIwSj0Yg= +cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.27.1 h1:q+J/Nfr6Qx4RQeu3rJcnN48SNC0qzlYzSeqkPq93VHs= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.29.0 h1:6weCgzRvMg7lzuUurI4697AqIRPU1SvzHhynwpW31jI= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= cloud.google.com/go/trace v0.1.0/go.mod h1:wxEwsoeRVPbeSkt7ZC9nWCgmoKQRAoySN7XHW2AmI7g= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= @@ -769,8 +775,9 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -812,8 +819,11 @@ github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxeh github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -821,8 +831,9 @@ github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pf github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= @@ -1517,8 +1528,10 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1526,8 +1539,10 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.194/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y= @@ -1649,8 +1664,9 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0 h1:mac9BKRqwaX6zxHPDe3pvmWpwuuIM0vuXv2juCnQevE= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0/go.mod h1:5eCOqeGphOyz6TsY3ZDNjE33SM/TFAK3RGuCL2naTgY= go.opentelemetry.io/otel v1.0.0-RC1/go.mod h1:x9tRa9HK4hSSq7jf2TKbqFbtt58/TGk0f9XiEYISI1I= @@ -1797,8 +1813,9 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1877,8 +1894,8 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220706163947-c90051bbdb60 h1:8NSylCMxLW4JvserAndSgFL7aPli6A68yf0bYFTcWCM= -golang.org/x/net v0.0.0-20220706163947-c90051bbdb60/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b h1:tvrvnPFcdzp294diPnrdZZZ8XUt2Tyj7svb7X52iDuU= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1902,8 +1919,9 @@ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 h1:zwrSfklXn0gxyLRX/aR+q6cgHbV/ItVyzbPlbA+dkAw= golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1916,8 +1934,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2040,8 +2059,8 @@ golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b h1:2n253B2r0pYSmEV+UNCQoPfU/FiaizQEK5Gu4Bq4JE8= -golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2055,8 +2074,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2065,8 +2085,9 @@ golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 h1:M73Iuj3xbbb9Uk1DYhzydthsj6oOd6l9bpuFcNoUvTs= golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2146,15 +2167,17 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= @@ -2204,8 +2227,9 @@ google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.83.0 h1:pMvST+6v+46Gabac4zlJlalxZjCeRcepwg2EdBU+nCc= google.golang.org/api v0.83.0/go.mod h1:CNywQoj/AfhTw26ZWAa6LwOv+6WFxHmeLPZq2uncLZk= +google.golang.org/api v0.106.0 h1:ffmW0faWCwKkpbbtvlY/K/8fUl+JKvNS5CVzRoyfCv8= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2281,7 +2305,6 @@ google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= @@ -2302,8 +2325,9 @@ google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 h1:qRu95HZ148xXw+XeZ3dvqe85PxH4X8+jIo0iRPKcEnM= google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8/go.mod h1:yKyY4AMRwFiC8yMMNaMi+RkCnjZJt9LoWuvhXjMs+To= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -2322,8 +2346,9 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= diff --git a/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/vendor/cloud.google.com/go/.release-please-manifest-individual.json new file mode 100644 index 0000000000000..e5ebe1edf35e0 --- /dev/null +++ b/vendor/cloud.google.com/go/.release-please-manifest-individual.json @@ -0,0 +1,13 @@ +{ + "bigquery": "1.43.0", + "bigtable": "1.18.0", + "datastore": "1.9.0", + "errorreporting": "0.2.0", + "firestore": "1.8.0", + "logging": "1.5.0", + "profiler": "0.3.0", + "pubsub": "1.26.0", + "pubsublite": "1.4.1", + "spanner": "1.40.0", + "storage": "1.28.0" +} diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json new file mode 100644 index 0000000000000..db91415aa4a8c --- /dev/null +++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json @@ -0,0 +1,115 @@ +{ + "accessapproval": "1.5.0", + "accesscontextmanager": "1.5.0", + "aiplatform": "1.25.0", + "analytics": "0.12.0", + "apigateway": "1.4.0", + "apigeeconnect": "1.4.0", + "apigeeregistry": "0.2.1", + "apikeys": "0.2.0", + "appengine": "1.5.0", + "area120": "0.6.0", + "artifactregistry": "1.9.0", + "asset": "1.10.0", + "assuredworkloads": "1.9.0", + "automl": "1.8.0", + "baremetalsolution": "0.4.0", + "batch": "0.4.0", + "beyondcorp": "0.3.0", + "billing": "1.7.0", + "binaryauthorization": "1.4.0", + "certificatemanager": "1.4.0", + "channel": "1.9.0", + "cloudbuild": "1.5.0", + "clouddms": "1.4.0", + "cloudtasks": "1.8.0", + "compute": "1.12.1", + "compute/metadata": "0.2.1", + "contactcenterinsights": "1.4.0", + "container": "1.8.0", + "containeranalysis": "0.6.0", + "datacatalog": "1.8.1", + "dataflow": "0.7.0", + "dataform": "0.5.0", + "datafusion": "1.5.0", + "datalabeling": "0.6.0", + "dataplex": "1.4.0", + "dataproc": "1.8.0", + "dataqna": "0.6.0", + "datastream": "1.5.0", + "deploy": "1.5.0", + "dialogflow": "1.20.0", + "dlp": "1.7.0", + "documentai": "1.11.0", + "domains": "0.7.0", + "edgecontainer": "0.2.0", + "essentialcontacts": "1.4.0", + "eventarc": "1.9.0", + "filestore": "1.4.0", + "functions": "1.9.0", + "gaming": "1.8.0", + "gkebackup": "0.3.0", + "gkeconnect": "0.6.0", + "gkehub": "0.10.0", + "gkemulticloud": "0.4.0", + "grafeas": "0.2.0", + "gsuiteaddons": "1.4.0", + "iam": "0.7.0", + "iap": "1.5.0", + "ids": "1.2.0", + "iot": "1.4.0", + "kms": "1.6.0", + "language": "1.8.0", + "lifesciences": "0.6.0", + "longrunning": "0.3.0", + "managedidentities": "1.4.0", + "maps": "0.0.0", + "mediatranslation": "0.6.0", + "memcache": "1.7.0", + "metastore": "1.8.0", + "monitoring": "1.9.0", + "networkconnectivity": "1.8.0", + "networkmanagement": "1.5.0", + "networksecurity": "0.6.0", + "notebooks": "1.5.0", + "optimization": "1.2.0", + "orchestration": "1.4.0", + "orgpolicy": "1.5.0", + "osconfig": "1.10.0", + "oslogin": "1.7.0", + "phishingprotection": "0.6.0", + "policytroubleshooter": "1.4.0", + "privatecatalog": "0.6.0", + "recaptchaenterprise/v2": "2.5.0", + "recommendationengine": "0.6.0", + "recommender": "1.8.0", + "redis": "1.10.0", + "resourcemanager": "1.4.0", + "resourcesettings": "1.4.0", + "retail": "1.11.0", + "run": "0.4.0", + "scheduler": "1.7.0", + "secretmanager": "1.9.0", + "security": "1.10.0", + "securitycenter": "1.16.0", + "servicecontrol": "1.5.0", + "servicedirectory": "1.7.0", + "servicemanagement": "1.5.0", + "serviceusage": "1.4.0", + "shell": "1.4.0", + "speech": "1.9.0", + "storagetransfer": "1.6.0", + "talent": "1.4.1", + "texttospeech": "1.5.0", + "tpu": "1.4.0", + "trace": "1.4.0", + "translate": "1.4.0", + "video": "1.10.0", + "videointelligence": "1.9.0", + "vision/v2": "2.5.0", + "vmmigration": "1.3.0", + "vpcaccess": "1.5.0", + "webrisk": "1.7.0", + "websecurityscanner": "1.4.0", + "workflows": "1.9.0" +} diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json new file mode 100644 index 0000000000000..9eeeeb4545388 --- /dev/null +++ b/vendor/cloud.google.com/go/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "0.107.0" +} diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index bac014faa35af..bb55f2e272b08 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,71 @@ # Changes +## [0.107.0](https://github.com/googleapis/google-cloud-go/compare/v0.106.0...v0.107.0) (2022-11-15) + + +### Features + +* **routing:** Start generating apiv2 ([#7011](https://github.com/googleapis/google-cloud-go/issues/7011)) ([66e8e27](https://github.com/googleapis/google-cloud-go/commit/66e8e2717b2593f4e5640ecb97344bb1d5e5fc0b)) + +## [0.106.0](https://github.com/googleapis/google-cloud-go/compare/v0.105.0...v0.106.0) (2022-11-09) + + +### Features + +* **debugger:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) + +## [0.104.0](https://github.com/googleapis/google-cloud-go/compare/v0.103.0...v0.104.0) (2022-08-24) + + +### Features + +* **godocfx:** add friendlyAPIName ([#6447](https://github.com/googleapis/google-cloud-go/issues/6447)) ([c6d3ba4](https://github.com/googleapis/google-cloud-go/commit/c6d3ba401b7b3ae9b710a8850c6ec5d49c4c1490)) + +## [0.103.0](https://github.com/googleapis/google-cloud-go/compare/v0.102.1...v0.103.0) (2022-06-29) + + +### Features + +* **privateca:** temporarily remove REGAPIC support ([199b725](https://github.com/googleapis/google-cloud-go/commit/199b7250f474b1a6f53dcf0aac0c2966f4987b68)) + +## [0.102.1](https://github.com/googleapis/google-cloud-go/compare/v0.102.0...v0.102.1) (2022-06-17) + + +### Bug Fixes + +* **longrunning:** regapic remove path params duped as query params ([#6183](https://github.com/googleapis/google-cloud-go/issues/6183)) ([c963be3](https://github.com/googleapis/google-cloud-go/commit/c963be301f074779e6bb8c897d8064fa076e9e35)) + +## [0.102.0](https://github.com/googleapis/google-cloud-go/compare/v0.101.1...v0.102.0) (2022-05-24) + + +### Features + +* **civil:** add Before and After methods to civil.Time ([#5703](https://github.com/googleapis/google-cloud-go/issues/5703)) ([7acaaaf](https://github.com/googleapis/google-cloud-go/commit/7acaaafef47668c3e8382b8bc03475598c3db187)) + +### [0.101.1](https://github.com/googleapis/google-cloud-go/compare/v0.101.0...v0.101.1) (2022-05-03) + + +### Bug Fixes + +* **internal/gapicgen:** properly update modules that have no gapic changes ([#5945](https://github.com/googleapis/google-cloud-go/issues/5945)) ([de2befc](https://github.com/googleapis/google-cloud-go/commit/de2befcaa2a886499db9da6d4d04d28398c8d44b)) + +## [0.101.0](https://github.com/googleapis/google-cloud-go/compare/v0.100.2...v0.101.0) (2022-04-20) + + +### Features + +* **all:** bump grpc dep ([#5481](https://github.com/googleapis/google-cloud-go/issues/5481)) ([b12964d](https://github.com/googleapis/google-cloud-go/commit/b12964df5c63c647aaf204e73cfcdfd379d19682)) +* **internal/gapicgen:** change versionClient for gapics ([#5687](https://github.com/googleapis/google-cloud-go/issues/5687)) ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) + + +### Bug Fixes + +* **internal/gapicgen:** add generation of internal/version.go for new client modules ([#5726](https://github.com/googleapis/google-cloud-go/issues/5726)) ([341e0df](https://github.com/googleapis/google-cloud-go/commit/341e0df1e44480706180cc5b07c49b3cee904095)) +* **internal/gapicgen:** don't gen version files for longrunning and debugger ([#5698](https://github.com/googleapis/google-cloud-go/issues/5698)) ([3a81108](https://github.com/googleapis/google-cloud-go/commit/3a81108c74cd8864c56b8ab5939afd864db3c64b)) +* **internal/gapicgen:** don't try to make snippets for non-gapics ([#5919](https://github.com/googleapis/google-cloud-go/issues/5919)) ([c94dddc](https://github.com/googleapis/google-cloud-go/commit/c94dddc60ef83a0584ba8f7dd24589d9db971672)) +* **internal/gapicgen:** move breaking change indicator if present ([#5452](https://github.com/googleapis/google-cloud-go/issues/5452)) ([e712df5](https://github.com/googleapis/google-cloud-go/commit/e712df5ebb45598a1653081d7e11e578bad22ff8)) +* **internal/godocfx:** prevent errors for filtered mods ([#5485](https://github.com/googleapis/google-cloud-go/issues/5485)) ([6cb9b89](https://github.com/googleapis/google-cloud-go/commit/6cb9b89b2d654c695eab00d8fb375cce0cd6e059)) + ## [0.100.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.99.0...v0.100.0) (2022-01-04) diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md index c3a3852c3829e..6d6e48b65bd72 100644 --- a/vendor/cloud.google.com/go/CONTRIBUTING.md +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -2,7 +2,7 @@ 1. [File an issue](https://github.com/googleapis/google-cloud-go/issues/new/choose). The issue will be used to discuss the bug or feature and should be created - before sending a CL. + before sending a PR. 1. [Install Go](https://golang.org/dl/). 1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`) diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md index acde43bc8885b..01453cc692a53 100644 --- a/vendor/cloud.google.com/go/README.md +++ b/vendor/cloud.google.com/go/README.md @@ -32,7 +32,14 @@ For an updated list of all of our released APIs please see our ## [Go Versions Supported](#supported-versions) -We currently support Go versions 1.11 and newer. +Our libraries are compatible with at least the three most recent, major Go +releases. They are currently compatible with: + +- Go 1.19 +- Go 1.18 +- Go 1.17 +- Go 1.16 +- Go 1.15 ## Authorization diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go new file mode 100644 index 0000000000000..c9ba91825c29d --- /dev/null +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.14.0" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md new file mode 100644 index 0000000000000..06b957349afd5 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -0,0 +1,19 @@ +# Changes + +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) + + +### Bug Fixes + +* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) + + +### Bug Fixes + +* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) + +## [0.1.0] (2022-10-26) + +Initial release of metadata being it's own module. diff --git a/vendor/cloud.google.com/go/compute/metadata/LICENSE b/vendor/cloud.google.com/go/compute/metadata/LICENSE new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/README.md b/vendor/cloud.google.com/go/compute/metadata/README.md new file mode 100644 index 0000000000000..f940fb2c85b83 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/README.md @@ -0,0 +1,27 @@ +# Compute API + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata) + +This is a utility library for communicating with Google Cloud metadata service +on Google Cloud. + +## Install + +```bash +go get cloud.google.com/go/compute/metadata +``` + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 111309f3d8b4e..c17faa142a44f 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -16,7 +16,7 @@ // metadata and API service accounts. // // This package is a wrapper around the GCE metadata service, -// as documented at https://developers.google.com/compute/docs/metadata. +// as documented at https://cloud.google.com/compute/docs/metadata/overview. package metadata // import "cloud.google.com/go/compute/metadata" import ( @@ -70,7 +70,9 @@ func newDefaultHTTPClient() *http.Client { Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, + IdleConnTimeout: 60 * time.Second, }, + Timeout: 5 * time.Second, } } @@ -145,7 +147,7 @@ func testOnGCE() bool { go func() { resolver := &net.Resolver{} - addrs, err := resolver.LookupHost(ctx, "metadata.google.internal") + addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") if err != nil || len(addrs) == 0 { resc <- false return diff --git a/vendor/cloud.google.com/go/pubsub/go_mod_tidy_hack.go b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go similarity index 80% rename from vendor/cloud.google.com/go/pubsub/go_mod_tidy_hack.go rename to vendor/cloud.google.com/go/compute/metadata/tidyfix.go index 20b865930b9ce..4cef48500817c 100644 --- a/vendor/cloud.google.com/go/pubsub/go_mod_tidy_hack.go +++ b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,11 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the cloud.google.com/go import, won't actually become part of +// This file, and the {{.RootMod}} import, won't actually become part of // the resultant binary. +//go:build modhack // +build modhack -package pubsub +package metadata // Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "cloud.google.com/go" +import _ "cloud.google.com/go/compute/internal" diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go index 746696f371144..833878ec8f313 100644 --- a/vendor/cloud.google.com/go/doc.go +++ b/vendor/cloud.google.com/go/doc.go @@ -17,14 +17,30 @@ Package cloud is the root of the packages used to access Google Cloud Services. See https://godoc.org/cloud.google.com/go for a full list of sub-packages. - -Client Options +# Client Options All clients in sub-packages are configurable via client options. These options are described here: https://godoc.org/google.golang.org/api/option. +## Endpoint Override + +Endpoint configuration is used to specify the URL to which requests are +sent. It is used for services that support or require regional endpoints, as well +as for other use cases such as [testing against fake +servers](https://github.com/googleapis/google-cloud-go/blob/main/testing.md#testing-grpc-services-using-fakes). + +For example, the Vertex AI service recommends that you configure the endpoint to the +location with the features you want that is closest to your physical location or the +location of your users. There is no global endpoint for Vertex AI. See +[Vertex AI - Locations](https://cloud.google.com/vertex-ai/docs/general/locations) +for more details. The following example demonstrates configuring a Vertex AI client +with a regional endpoint: + + ctx := context.Background() + endpoint := "us-central1-aiplatform.googleapis.com:443" + client, err := aiplatform.NewDatasetClient(ctx, option.WithEndpoint(endpoint)) -Authentication and Authorization +# Authentication and Authorization All the clients in sub-packages support authentication via Google Application Default Credentials (see https://cloud.google.com/docs/authentication/production), or @@ -35,11 +51,12 @@ and authenticate clients. For information on how to create and obtain Application Default Credentials, see https://cloud.google.com/docs/authentication/production. Here is an example of a client using ADC to authenticate: - client, err := secretmanager.NewClient(context.Background()) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. + + client, err := secretmanager.NewClient(context.Background()) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. You can use a file with credentials to authenticate and authorize, such as a JSON key file associated with a Google service account. Service Account keys can be @@ -47,12 +64,13 @@ created and downloaded from https://console.cloud.google.com/iam-admin/serviceaccounts. This example uses the Secret Manger client, but the same steps apply to the other client libraries underneath this package. Example: - client, err := secretmanager.NewClient(context.Background(), - option.WithCredentialsFile("/path/to/service-account-key.json")) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. + + client, err := secretmanager.NewClient(context.Background(), + option.WithCredentialsFile("/path/to/service-account-key.json")) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. In some cases (for instance, you don't want to store secrets on disk), you can create credentials from in-memory JSON and use the WithCredentials option. @@ -62,19 +80,19 @@ the other client libraries underneath this package. Note that scopes can be found at https://developers.google.com/identity/protocols/oauth2/scopes, and are also provided in all auto-generated libraries: for example, cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example: - ctx := context.Background() - creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...) - if err != nil { - // TODO: handle error. - } - client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds)) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. + ctx := context.Background() + creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...) + if err != nil { + // TODO: handle error. + } + client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds)) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. -Timeouts and Cancellation +# Timeouts and Cancellation By default, non-streaming methods, like Create or Get, will have a default deadline applied to the context provided at call time, unless a context deadline is already set. Streaming @@ -83,40 +101,42 @@ arrange for cancellation, use contexts. Transient errors will be retried when correctness allows. Here is an example of how to set a timeout for an RPC, use context.WithTimeout: - ctx := context.Background() - // Do not set a timeout on the context passed to NewClient: dialing happens - // asynchronously, and the context is used to refresh credentials in the - // background. - client, err := secretmanager.NewClient(ctx) - if err != nil { - // TODO: handle error. - } - // Time out if it takes more than 10 seconds to create a dataset. - tctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() // Always call cancel. - - req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"} - if err := client.DeleteSecret(tctx, req); err != nil { - // TODO: handle error. - } + + ctx := context.Background() + // Do not set a timeout on the context passed to NewClient: dialing happens + // asynchronously, and the context is used to refresh credentials in the + // background. + client, err := secretmanager.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Time out if it takes more than 10 seconds to create a dataset. + tctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() // Always call cancel. + + req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"} + if err := client.DeleteSecret(tctx, req); err != nil { + // TODO: handle error. + } Here is an example of how to arrange for an RPC to be canceled, use context.WithCancel: - ctx := context.Background() - // Do not cancel the context passed to NewClient: dialing happens asynchronously, - // and the context is used to refresh credentials in the background. - client, err := secretmanager.NewClient(ctx) - if err != nil { - // TODO: handle error. - } - cctx, cancel := context.WithCancel(ctx) - defer cancel() // Always call cancel. - - // TODO: Make the cancel function available to whatever might want to cancel the - // call--perhaps a GUI button. - req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"} - if err := client.DeleteSecret(cctx, req); err != nil { - // TODO: handle error. - } + + ctx := context.Background() + // Do not cancel the context passed to NewClient: dialing happens asynchronously, + // and the context is used to refresh credentials in the background. + client, err := secretmanager.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + cctx, cancel := context.WithCancel(ctx) + defer cancel() // Always call cancel. + + // TODO: Make the cancel function available to whatever might want to cancel the + // call--perhaps a GUI button. + req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"} + if err := client.DeleteSecret(cctx, req); err != nil { + // TODO: handle error. + } To opt out of default deadlines, set the temporary environment variable GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE to "true" prior to client @@ -130,8 +150,7 @@ timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts would be ineffective and would only interfere with credential refreshing, which uses the same context. - -Connection Pooling +# Connection Pooling Connection pooling differs in clients based on their transport. Cloud clients either rely on HTTP or gRPC transports to communicate @@ -147,37 +166,65 @@ of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a clie option to NewClient calls. This configures the underlying gRPC connections to be pooled and addressed in a round robin fashion. - -Using the Libraries with Docker +# Using the Libraries with Docker Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928 for more information. - -Debugging +# Debugging To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See https://godoc.org/google.golang.org/grpc/grpclog for more information. For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2". +# Inspecting errors + +Most of the errors returned by the generated clients are wrapped in an +[github.com/googleapis/gax-go/v2/apierror.APIError] and can be further unwrapped +into a [google.golang.org/grpc/status.Status] or +[google.golang.org/api/googleapi.Error] depending +on the transport used to make the call (gRPC or REST). Converting your errors to +these types can be a useful way to get more information about what went wrong +while debugging. + +[github.com/googleapis/gax-go/v2/apierror.APIError] gives access to specific +details in the error. The transport-specific errors can still be unwrapped using +the [github.com/googleapis/gax-go/v2/apierror.APIError]. + + if err != nil { + var ae *apierror.APIError + if errors.As(err, &ae) { + log.Println(ae.Reason()) + log.Println(ae.Details().Help.GetLinks()) + } + } + +If the gRPC transport was used, the [google.golang.org/grpc/status.Status] can +still be parsed using the [google.golang.org/grpc/status.FromError] function. + + if err != nil { + if s, ok := status.FromError(err); ok { + log.Println(s.Message()) + for _, d := range s.Proto().Details { + log.Println(d) + } + } + } -Inspecting errors +If the REST transport was used, the [google.golang.org/api/googleapi.Error] can +be parsed in a similar way, allowing access to details such as the HTTP response +code. -Most of the errors returned by the generated clients can be converted into a -`grpc.Status`. Converting your errors to this type can be a useful to get -more information about what went wrong while debugging. - if err != { - if s, ok := status.FromError(err); ok { - log.Println(s.Message()) - for _, d := range s.Proto().Details { - log.Println(d) + if err != nil { + var gerr *googleapi.Error + if errors.As(err, &gerr) { + log.Println(gerr.Message) } } - } -Client Stability +# Client Stability Clients in this repository are considered alpha or beta unless otherwise marked as stable in the README.md. Semver is not used to communicate stability diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index deca87dea5bc5..ced217827b0d5 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,5 +1,61 @@ # Changes +## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.7.0...iam/v0.8.0) (2022-12-05) + + +### Features + +* **iam:** Start generating and refresh some libraries ([#7089](https://github.com/googleapis/google-cloud-go/issues/7089)) ([a9045ff](https://github.com/googleapis/google-cloud-go/commit/a9045ff191a711089c37f1d94a63522d9939ce38)) + +## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.6.0...iam/v0.7.0) (2022-11-03) + + +### Features + +* **iam:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) + +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.5.0...iam/v0.6.0) (2022-10-25) + + +### Features + +* **iam:** start generating stubs dir ([de2d180](https://github.com/googleapis/google-cloud-go/commit/de2d18066dc613b72f6f8db93ca60146dabcfdcc)) + +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.4.0...iam/v0.5.0) (2022-09-28) + + +### Features + +* **iam:** remove ListApplicablePolicies ([52dddd1](https://github.com/googleapis/google-cloud-go/commit/52dddd1ed89fbe77e1859311c3b993a77a82bfc7)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.3.0...iam/v0.4.0) (2022-09-06) + + +### Features + +* **iam:** start generating apiv2 ([#6605](https://github.com/googleapis/google-cloud-go/issues/6605)) ([a6004e7](https://github.com/googleapis/google-cloud-go/commit/a6004e762f782869cd85688937475744f7b17e50)) + +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.2.0...iam/v0.3.0) (2022-02-23) + + +### Features + +* **iam:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) + +## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.1.1...iam/v0.2.0) (2022-02-14) + + +### Features + +* **iam:** add file for tracking version ([17b36ea](https://github.com/googleapis/google-cloud-go/commit/17b36ead42a96b1a01105122074e65164357519e)) + +### [0.1.1](https://www.github.com/googleapis/google-cloud-go/compare/iam/v0.1.0...iam/v0.1.1) (2022-01-14) + + +### Bug Fixes + +* **iam:** run formatter ([#5277](https://www.github.com/googleapis/google-cloud-go/issues/5277)) ([8682e4e](https://www.github.com/googleapis/google-cloud-go/commit/8682e4ed57a4428a659fbc225f56c91767e2a4a9)) + ## v0.1.0 This is the first tag to carve out iam as its own module. See diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go similarity index 99% rename from vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go rename to vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index 6fbf54f448954..2793098aabcf5 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.5 // source: google/iam/v1/iam_policy.proto -package iam +package iampb import ( context "context" diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go similarity index 99% rename from vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go rename to vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index abea46d9bc122..835f217199802 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.5 // source: google/iam/v1/options.proto -package iam +package iampb import ( reflect "reflect" diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go similarity index 88% rename from vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go rename to vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index bedd5f2433612..ec7777a7687d0 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.5 // source: google/iam/v1/policy.proto -package iam +package iampb import ( reflect "reflect" @@ -203,7 +203,6 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // An Identity and Access Management (IAM) policy, which specifies access // controls for Google Cloud resources. // -// // A `Policy` is a collection of `bindings`. A `binding` binds one or more // `members`, or principals, to a single `role`. Principals can be user // accounts, service accounts, Google groups, and domains (such as G Suite). A @@ -219,51 +218,51 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // // **JSON example:** // -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": [ -// "user:eve@example.com" -// ], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } +// { +// "bindings": [ +// { +// "role": "roles/resourcemanager.organizationAdmin", +// "members": [ +// "user:mike@example.com", +// "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" +// ] +// }, +// { +// "role": "roles/resourcemanager.organizationViewer", +// "members": [ +// "user:eve@example.com" +// ], +// "condition": { +// "title": "expirable access", +// "description": "Does not grant access after Sep 2020", +// "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", +// } +// } +// ], +// "etag": "BwWWja0YfJA=", +// "version": 3 +// } // // **YAML example:** // -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < timestamp('2020-10-01T00:00:00.000Z') -// etag: BwWWja0YfJA= -// version: 3 +// bindings: +// - members: +// - user:mike@example.com +// - group:admins@example.com +// - domain:google.com +// - serviceAccount:my-project-id@appspot.gserviceaccount.com +// role: roles/resourcemanager.organizationAdmin +// - members: +// - user:eve@example.com +// role: roles/resourcemanager.organizationViewer +// condition: +// title: expirable access +// description: Does not grant access after Sep 2020 +// expression: request.time < timestamp('2020-10-01T00:00:00.000Z') +// etag: BwWWja0YfJA= +// version: 3 // // For a description of IAM and its features, see the // [IAM documentation](https://cloud.google.com/iam/docs/). @@ -280,11 +279,11 @@ type Policy struct { // Any operation that affects conditional role bindings must specify version // `3`. This requirement applies to the following operations: // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a policy - // that includes conditions + // - Getting a policy that includes a conditional role binding + // - Adding a conditional role binding to a policy + // - Changing a conditional role binding in a policy + // - Removing any role binding, with or without a condition, from a policy + // that includes conditions // // **Important:** If you use IAM Conditions, you must include the `etag` field // whenever you call `setIamPolicy`. If you omit this field, then IAM allows @@ -397,47 +396,43 @@ type Binding struct { // Specifies the principals requesting access for a Cloud Platform resource. // `members` can have the following values: // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. - // - // * `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a service - // account. For example, `my-other-app@appspot.gserviceaccount.com`. + // - `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. // - // * `group:{emailid}`: An email address that represents a Google group. - // For example, `admins@example.com`. + // - `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a user that has been recently deleted. For - // example, `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered user - // retains the role in the binding. + // - `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus - // unique identifier) representing a service account that has been recently - // deleted. For example, - // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account retains the - // role in the binding. + // - `serviceAccount:{emailid}`: An email address that represents a service + // account. For example, `my-other-app@appspot.gserviceaccount.com`. // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a Google group that has been recently - // deleted. For example, `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` and the - // recovered group retains the role in the binding. + // - `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. // + // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a user that has been recently deleted. For + // example, `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered user + // retains the role in the binding. // - // * `domain:{domain}`: The G Suite domain (primary) that represents all the - // users of that domain. For example, `google.com` or `example.com`. + // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a service account that has been recently + // deleted. For example, + // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains the + // role in the binding. // + // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a Google group that has been recently + // deleted. For example, `admins@example.com?uid=123456789012345678901`. If + // the group is recovered, this value reverts to `group:{emailid}` and the + // recovered group retains the role in the binding. // + // - `domain:{domain}`: The G Suite domain (primary) that represents all the + // users of that domain. For example, `google.com` or `example.com`. Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` // The condition that is associated with this binding. // @@ -519,41 +514,41 @@ func (x *Binding) GetCondition() *expr.Expr { // // Example Policy with multiple AuditConfigs: // -// { -// "audit_configs": [ -// { -// "service": "allServices", -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE" -// }, -// { -// "log_type": "ADMIN_READ" -// } -// ] -// }, -// { -// "service": "sampleservice.googleapis.com", -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ" -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:aliya@example.com" -// ] -// } -// ] -// } -// ] -// } +// { +// "audit_configs": [ +// { +// "service": "allServices", +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ", +// "exempted_members": [ +// "user:jose@example.com" +// ] +// }, +// { +// "log_type": "DATA_WRITE" +// }, +// { +// "log_type": "ADMIN_READ" +// } +// ] +// }, +// { +// "service": "sampleservice.googleapis.com", +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ" +// }, +// { +// "log_type": "DATA_WRITE", +// "exempted_members": [ +// "user:aliya@example.com" +// ] +// } +// ] +// } +// ] +// } // // For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ // logging. It also exempts jose@example.com from DATA_READ logging, and @@ -620,19 +615,19 @@ func (x *AuditConfig) GetAuditLogConfigs() []*AuditLogConfig { // Provides the configuration for logging a type of permissions. // Example: // -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE" -// } -// ] -// } +// { +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ", +// "exempted_members": [ +// "user:jose@example.com" +// ] +// }, +// { +// "log_type": "DATA_WRITE" +// } +// ] +// } // // This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting // jose@example.com from DATA_READ logging. diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index e458e7e6c1825..ac3f773f8dd66 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -6,7 +6,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/accessapproval/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/accesscontextmanager/apiv1": { "distribution_name": "cloud.google.com/go/accesscontextmanager/apiv1", @@ -14,8 +14,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/accesscontextmanager/latest/apiv1", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/aiplatform/apiv1": { "distribution_name": "cloud.google.com/go/aiplatform/apiv1", @@ -24,7 +24,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/aiplatform/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/aiplatform/apiv1beta1": { + "distribution_name": "cloud.google.com/go/aiplatform/apiv1beta1", + "description": "Vertex AI API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/aiplatform/latest/apiv1beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/analytics/admin/apiv1alpha": { "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", @@ -33,7 +42,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/analytics/latest/admin/apiv1alpha", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/apigateway/apiv1": { "distribution_name": "cloud.google.com/go/apigateway/apiv1", @@ -42,7 +51,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigateway/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/apigeeconnect/apiv1": { "distribution_name": "cloud.google.com/go/apigeeconnect/apiv1", @@ -51,7 +60,25 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeconnect/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/apigeeregistry/apiv1": { + "distribution_name": "cloud.google.com/go/apigeeregistry/apiv1", + "description": "Apigee Registry API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeregistry/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/apikeys/apiv2": { + "distribution_name": "cloud.google.com/go/apikeys/apiv2", + "description": "API Keys API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apikeys/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/appengine/apiv1": { "distribution_name": "cloud.google.com/go/appengine/apiv1", @@ -60,7 +87,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/appengine/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/area120/tables/apiv1alpha1": { "distribution_name": "cloud.google.com/go/area120/tables/apiv1alpha1", @@ -69,7 +96,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/area120/latest/tables/apiv1alpha1", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/artifactregistry/apiv1": { + "distribution_name": "cloud.google.com/go/artifactregistry/apiv1", + "description": "Artifact Registry API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/artifactregistry/apiv1beta2": { "distribution_name": "cloud.google.com/go/artifactregistry/apiv1beta2", @@ -78,7 +114,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1beta2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/asset/apiv1": { "distribution_name": "cloud.google.com/go/asset/apiv1", @@ -87,7 +123,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/asset/apiv1p2beta1": { "distribution_name": "cloud.google.com/go/asset/apiv1p2beta1", @@ -96,7 +132,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1p2beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/asset/apiv1p5beta1": { "distribution_name": "cloud.google.com/go/asset/apiv1p5beta1", @@ -105,7 +141,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1p5beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/assuredworkloads/apiv1": { + "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1", + "description": "Assured Workloads API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/assuredworkloads/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/assuredworkloads/apiv1beta1": { "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1beta1", @@ -114,7 +159,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/assuredworkloads/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/automl/apiv1": { "distribution_name": "cloud.google.com/go/automl/apiv1", @@ -123,7 +168,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/automl/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/automl/apiv1beta1": { "distribution_name": "cloud.google.com/go/automl/apiv1beta1", @@ -132,7 +177,70 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/automl/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/baremetalsolution/apiv2": { + "distribution_name": "cloud.google.com/go/baremetalsolution/apiv2", + "description": "Bare Metal Solution API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/baremetalsolution/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/batch/apiv1": { + "distribution_name": "cloud.google.com/go/batch/apiv1", + "description": "Batch API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/batch/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/appconnections/apiv1": { + "distribution_name": "cloud.google.com/go/beyondcorp/appconnections/apiv1", + "description": "BeyondCorp API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnections/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/appconnectors/apiv1": { + "distribution_name": "cloud.google.com/go/beyondcorp/appconnectors/apiv1", + "description": "BeyondCorp API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnectors/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/appgateways/apiv1": { + "distribution_name": "cloud.google.com/go/beyondcorp/appgateways/apiv1", + "description": "BeyondCorp API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appgateways/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1": { + "distribution_name": "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1", + "description": "BeyondCorp API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientconnectorservices/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/clientgateways/apiv1": { + "distribution_name": "cloud.google.com/go/beyondcorp/clientgateways/apiv1", + "description": "BeyondCorp API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientgateways/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery": { "distribution_name": "cloud.google.com/go/bigquery", @@ -143,6 +251,15 @@ "release_level": "ga", "library_type": "GAPIC_MANUAL" }, + "cloud.google.com/go/bigquery/analyticshub/apiv1": { + "distribution_name": "cloud.google.com/go/bigquery/analyticshub/apiv1", + "description": "Analytics Hub API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/analyticshub/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/bigquery/connection/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1", "description": "BigQuery Connection API", @@ -150,7 +267,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/connection/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/connection/apiv1beta1": { "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1beta1", @@ -159,7 +276,25 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/connection/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/dataexchange/apiv1beta1": { + "distribution_name": "cloud.google.com/go/bigquery/dataexchange/apiv1beta1", + "description": "Analytics Hub API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/dataexchange/apiv1beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/datapolicies/apiv1beta1": { + "distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1beta1", + "description": "BigQuery Data Policy API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/datatransfer/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/datatransfer/apiv1", @@ -168,7 +303,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datatransfer/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/migration/apiv2": { + "distribution_name": "cloud.google.com/go/bigquery/migration/apiv2", + "description": "BigQuery Migration API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/migration/apiv2alpha": { "distribution_name": "cloud.google.com/go/bigquery/migration/apiv2alpha", @@ -177,7 +321,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2alpha", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/reservation/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1", @@ -186,16 +330,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/reservation/apiv1", "release_level": "ga", - "library_type": "" - }, - "cloud.google.com/go/bigquery/reservation/apiv1beta1": { - "distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1beta1", - "description": "BigQuery Reservation API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/reservation/apiv1beta1", - "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/storage/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1", @@ -204,7 +339,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/storage/apiv1beta1": { "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta1", @@ -213,7 +348,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/storage/apiv1beta2": { "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta2", @@ -222,7 +357,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1beta2", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigtable": { "distribution_name": "cloud.google.com/go/bigtable", @@ -240,7 +375,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/billing/budgets/apiv1": { "distribution_name": "cloud.google.com/go/billing/budgets/apiv1", @@ -249,7 +384,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/budgets/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/billing/budgets/apiv1beta1": { "distribution_name": "cloud.google.com/go/billing/budgets/apiv1beta1", @@ -258,7 +393,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/budgets/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/binaryauthorization/apiv1": { + "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1", + "description": "Binary Authorization API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/binaryauthorization/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/binaryauthorization/apiv1beta1": { "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1beta1", @@ -267,7 +411,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/binaryauthorization/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/certificatemanager/apiv1": { + "distribution_name": "cloud.google.com/go/certificatemanager/apiv1", + "description": "Certificate Manager API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/certificatemanager/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/channel/apiv1": { "distribution_name": "cloud.google.com/go/channel/apiv1", @@ -276,7 +429,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/channel/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudbuild/apiv1/v2": { "distribution_name": "cloud.google.com/go/cloudbuild/apiv1/v2", @@ -285,7 +438,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudbuild/latest/apiv1/v2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/clouddms/apiv1": { "distribution_name": "cloud.google.com/go/clouddms/apiv1", @@ -294,7 +447,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/clouddms/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudtasks/apiv2": { "distribution_name": "cloud.google.com/go/cloudtasks/apiv2", @@ -303,7 +456,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudtasks/apiv2beta2": { "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta2", @@ -312,7 +465,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2beta2", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudtasks/apiv2beta3": { "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta3", @@ -321,23 +474,23 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2beta3", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/compute/apiv1": { "distribution_name": "cloud.google.com/go/compute/apiv1", "description": "Google Compute Engine API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/compute/apiv1", - "release_level": "beta", - "library_type": "" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/compute/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/compute/metadata": { "distribution_name": "cloud.google.com/go/compute/metadata", "description": "Service Metadata API", "language": "Go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/compute/metadata", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/compute/latest/metadata", "release_level": "ga", "library_type": "CORE" }, @@ -347,8 +500,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/contactcenterinsights/latest/apiv1", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/container/apiv1": { "distribution_name": "cloud.google.com/go/container/apiv1", @@ -357,7 +510,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/container/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/containeranalysis/apiv1beta1": { "distribution_name": "cloud.google.com/go/containeranalysis/apiv1beta1", @@ -366,7 +519,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/containeranalysis/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datacatalog/apiv1": { "distribution_name": "cloud.google.com/go/datacatalog/apiv1", @@ -375,7 +528,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datacatalog/apiv1beta1": { "distribution_name": "cloud.google.com/go/datacatalog/apiv1beta1", @@ -384,7 +537,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataflow/apiv1beta3": { "distribution_name": "cloud.google.com/go/dataflow/apiv1beta3", @@ -393,7 +546,25 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataflow/latest/apiv1beta3", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dataform/apiv1alpha2": { + "distribution_name": "cloud.google.com/go/dataform/apiv1alpha2", + "description": "Dataform API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1alpha2", + "release_level": "alpha", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dataform/apiv1beta1": { + "distribution_name": "cloud.google.com/go/dataform/apiv1beta1", + "description": "Dataform API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datafusion/apiv1": { "distribution_name": "cloud.google.com/go/datafusion/apiv1", @@ -402,7 +573,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datafusion/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datalabeling/apiv1beta1": { "distribution_name": "cloud.google.com/go/datalabeling/apiv1beta1", @@ -411,7 +582,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datalabeling/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dataplex/apiv1": { + "distribution_name": "cloud.google.com/go/dataplex/apiv1", + "description": "Cloud Dataplex API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataplex/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataproc/apiv1": { "distribution_name": "cloud.google.com/go/dataproc/apiv1", @@ -420,7 +600,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataproc/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataqna/apiv1alpha": { "distribution_name": "cloud.google.com/go/dataqna/apiv1alpha", @@ -429,7 +609,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataqna/latest/apiv1alpha", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datastore": { "distribution_name": "cloud.google.com/go/datastore", @@ -447,7 +627,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest/admin/apiv1", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/datastream/apiv1": { + "distribution_name": "cloud.google.com/go/datastream/apiv1", + "description": "Datastream API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastream/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datastream/apiv1alpha1": { "distribution_name": "cloud.google.com/go/datastream/apiv1alpha1", @@ -456,7 +645,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastream/latest/apiv1alpha1", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/debugger/apiv2": { "distribution_name": "cloud.google.com/go/debugger/apiv2", @@ -465,7 +654,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/debugger/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/deploy/apiv1": { "distribution_name": "cloud.google.com/go/deploy/apiv1", @@ -473,8 +662,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/deploy/latest/apiv1", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dialogflow/apiv2": { "distribution_name": "cloud.google.com/go/dialogflow/apiv2", @@ -483,7 +672,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dialogflow/apiv2beta1": { + "distribution_name": "cloud.google.com/go/dialogflow/apiv2beta1", + "description": "Dialogflow API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dialogflow/cx/apiv3": { "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3", @@ -492,7 +690,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/cx/apiv3", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dialogflow/cx/apiv3beta1": { "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3beta1", @@ -501,7 +699,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/cx/apiv3beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dlp/apiv2": { "distribution_name": "cloud.google.com/go/dlp/apiv2", @@ -510,7 +708,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dlp/latest/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/documentai/apiv1": { "distribution_name": "cloud.google.com/go/documentai/apiv1", @@ -519,7 +717,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/documentai/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/documentai/apiv1beta3": { "distribution_name": "cloud.google.com/go/documentai/apiv1beta3", @@ -528,7 +726,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/documentai/latest/apiv1beta3", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/domains/apiv1beta1": { "distribution_name": "cloud.google.com/go/domains/apiv1beta1", @@ -537,14 +735,23 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/domains/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/edgecontainer/apiv1": { + "distribution_name": "cloud.google.com/go/edgecontainer/apiv1", + "description": "Distributed Cloud Edge Container API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgecontainer/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/errorreporting": { "distribution_name": "cloud.google.com/go/errorreporting", "description": "Cloud Error Reporting API", "language": "Go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/errorreporting", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/errorreporting/latest", "release_level": "beta", "library_type": "GAPIC_MANUAL" }, @@ -555,7 +762,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/errorreporting/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/essentialcontacts/apiv1": { "distribution_name": "cloud.google.com/go/essentialcontacts/apiv1", @@ -564,7 +771,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/essentialcontacts/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/eventarc/apiv1": { "distribution_name": "cloud.google.com/go/eventarc/apiv1", @@ -573,7 +780,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/eventarc/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/eventarc/publishing/apiv1": { + "distribution_name": "cloud.google.com/go/eventarc/publishing/apiv1", + "description": "Eventarc Publishing API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/eventarc/latest/publishing/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/filestore/apiv1": { "distribution_name": "cloud.google.com/go/filestore/apiv1", @@ -581,8 +797,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/filestore/latest/apiv1", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/firestore": { "distribution_name": "cloud.google.com/go/firestore", @@ -600,7 +816,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/firestore/apiv1/admin": { "distribution_name": "cloud.google.com/go/firestore/apiv1/admin", @@ -609,7 +825,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest/apiv1/admin", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/functions/apiv1": { "distribution_name": "cloud.google.com/go/functions/apiv1", @@ -618,14 +834,32 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/functions/apiv2": { + "distribution_name": "cloud.google.com/go/functions/apiv2", + "description": "Cloud Functions API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/functions/apiv2beta": { + "distribution_name": "cloud.google.com/go/functions/apiv2beta", + "description": "Cloud Functions API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2beta", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/functions/metadata": { "distribution_name": "cloud.google.com/go/functions/metadata", "description": "Cloud Functions", "language": "Go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/functions/metadata", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/metadata", "release_level": "alpha", "library_type": "CORE" }, @@ -636,7 +870,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gaming/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gaming/apiv1beta": { "distribution_name": "cloud.google.com/go/gaming/apiv1beta", @@ -645,7 +879,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gaming/latest/apiv1beta", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/gkebackup/apiv1": { + "distribution_name": "cloud.google.com/go/gkebackup/apiv1", + "description": "Backup for GKE API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkebackup/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gkeconnect/gateway/apiv1beta1": { "distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1beta1", @@ -654,7 +897,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkeconnect/latest/gateway/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gkehub/apiv1beta1": { "distribution_name": "cloud.google.com/go/gkehub/apiv1beta1", @@ -663,7 +906,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkehub/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/gkemulticloud/apiv1": { + "distribution_name": "cloud.google.com/go/gkemulticloud/apiv1", + "description": "Anthos Multi-Cloud API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkemulticloud/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gsuiteaddons/apiv1": { "distribution_name": "cloud.google.com/go/gsuiteaddons/apiv1", @@ -672,25 +924,34 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gsuiteaddons/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iam": { "distribution_name": "cloud.google.com/go/iam", "description": "Cloud IAM", "language": "Go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/iam", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest", "release_level": "ga", "library_type": "CORE" }, + "cloud.google.com/go/iam/apiv2": { + "distribution_name": "cloud.google.com/go/iam/apiv2", + "description": "Identity and Access Management (IAM) API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/iam/credentials/apiv1": { "distribution_name": "cloud.google.com/go/iam/credentials/apiv1", "description": "IAM Service Account Credentials API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/iam/credentials/apiv1", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/credentials/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iap/apiv1": { "distribution_name": "cloud.google.com/go/iap/apiv1", @@ -699,7 +960,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iap/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/ids/apiv1": { "distribution_name": "cloud.google.com/go/ids/apiv1", @@ -707,8 +968,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ids/latest/apiv1", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iot/apiv1": { "distribution_name": "cloud.google.com/go/iot/apiv1", @@ -717,7 +978,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iot/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/kms/apiv1": { "distribution_name": "cloud.google.com/go/kms/apiv1", @@ -726,7 +987,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/kms/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/language/apiv1": { "distribution_name": "cloud.google.com/go/language/apiv1", @@ -735,16 +996,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/language/apiv1beta2": { "distribution_name": "cloud.google.com/go/language/apiv1beta2", - "description": "Google Cloud Natural Language API", + "description": "Cloud Natural Language API", "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1beta2", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/lifesciences/apiv2beta": { "distribution_name": "cloud.google.com/go/lifesciences/apiv2beta", @@ -753,7 +1014,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/lifesciences/latest/apiv2beta", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/logging": { "distribution_name": "cloud.google.com/go/logging", @@ -771,16 +1032,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/logging/latest/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/longrunning/autogen": { "distribution_name": "cloud.google.com/go/longrunning/autogen", "description": "Long Running Operations API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/longrunning/autogen", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/longrunning/latest/autogen", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/managedidentities/apiv1": { "distribution_name": "cloud.google.com/go/managedidentities/apiv1", @@ -789,7 +1050,25 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedidentities/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/addressvalidation/apiv1": { + "distribution_name": "cloud.google.com/go/maps/addressvalidation/apiv1", + "description": "Address Validation API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/addressvalidation/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/routing/apiv2": { + "distribution_name": "cloud.google.com/go/maps/routing/apiv2", + "description": "Routes API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/routing/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/mediatranslation/apiv1beta1": { "distribution_name": "cloud.google.com/go/mediatranslation/apiv1beta1", @@ -798,7 +1077,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/mediatranslation/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/memcache/apiv1": { "distribution_name": "cloud.google.com/go/memcache/apiv1", @@ -807,7 +1086,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memcache/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/memcache/apiv1beta2": { "distribution_name": "cloud.google.com/go/memcache/apiv1beta2", @@ -816,7 +1095,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memcache/latest/apiv1beta2", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/metastore/apiv1": { "distribution_name": "cloud.google.com/go/metastore/apiv1", @@ -825,7 +1104,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/metastore/apiv1alpha": { "distribution_name": "cloud.google.com/go/metastore/apiv1alpha", @@ -834,7 +1113,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1alpha", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/metastore/apiv1beta": { "distribution_name": "cloud.google.com/go/metastore/apiv1beta", @@ -843,7 +1122,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1beta", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/monitoring/apiv3/v2": { "distribution_name": "cloud.google.com/go/monitoring/apiv3/v2", @@ -852,7 +1131,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/apiv3/v2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/monitoring/dashboard/apiv1": { "distribution_name": "cloud.google.com/go/monitoring/dashboard/apiv1", @@ -861,7 +1140,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/dashboard/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/monitoring/metricsscope/apiv1": { "distribution_name": "cloud.google.com/go/monitoring/metricsscope/apiv1", @@ -870,7 +1149,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/metricsscope/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networkconnectivity/apiv1": { "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1", @@ -878,8 +1157,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkconnectivity/latest/apiv1", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networkconnectivity/apiv1alpha1": { "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1alpha1", @@ -888,7 +1167,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkconnectivity/latest/apiv1alpha1", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networkmanagement/apiv1": { "distribution_name": "cloud.google.com/go/networkmanagement/apiv1", @@ -897,7 +1176,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkmanagement/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networksecurity/apiv1beta1": { "distribution_name": "cloud.google.com/go/networksecurity/apiv1beta1", @@ -906,7 +1185,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networksecurity/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/notebooks/apiv1": { + "distribution_name": "cloud.google.com/go/notebooks/apiv1", + "description": "Notebooks API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/notebooks/apiv1beta1": { "distribution_name": "cloud.google.com/go/notebooks/apiv1beta1", @@ -915,7 +1203,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/optimization/apiv1": { + "distribution_name": "cloud.google.com/go/optimization/apiv1", + "description": "Cloud Optimization API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/optimization/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/orchestration/airflow/service/apiv1": { "distribution_name": "cloud.google.com/go/orchestration/airflow/service/apiv1", @@ -923,8 +1220,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/orchestration/latest/airflow/service/apiv1", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/orgpolicy/apiv2": { "distribution_name": "cloud.google.com/go/orgpolicy/apiv2", @@ -933,7 +1230,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/orgpolicy/latest/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/agentendpoint/apiv1": { "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1", @@ -942,16 +1239,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/agentendpoint/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/agentendpoint/apiv1beta": { "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1beta", - "description": "Cloud OS Config API", + "description": "OS Config API", "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/agentendpoint/apiv1beta", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/apiv1": { "distribution_name": "cloud.google.com/go/osconfig/apiv1", @@ -960,7 +1257,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/apiv1alpha": { "distribution_name": "cloud.google.com/go/osconfig/apiv1alpha", @@ -969,16 +1266,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1alpha", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/apiv1beta": { "distribution_name": "cloud.google.com/go/osconfig/apiv1beta", - "description": "Cloud OS Config API", + "description": "OS Config API", "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1beta", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/oslogin/apiv1": { "distribution_name": "cloud.google.com/go/oslogin/apiv1", @@ -987,7 +1284,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oslogin/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/oslogin/apiv1beta": { "distribution_name": "cloud.google.com/go/oslogin/apiv1beta", @@ -996,7 +1293,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oslogin/latest/apiv1beta", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/phishingprotection/apiv1beta1": { "distribution_name": "cloud.google.com/go/phishingprotection/apiv1beta1", @@ -1005,7 +1302,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/phishingprotection/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/policytroubleshooter/apiv1": { "distribution_name": "cloud.google.com/go/policytroubleshooter/apiv1", @@ -1014,7 +1311,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policytroubleshooter/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/privatecatalog/apiv1beta1": { "distribution_name": "cloud.google.com/go/privatecatalog/apiv1beta1", @@ -1023,14 +1320,14 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/privatecatalog/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/profiler": { "distribution_name": "cloud.google.com/go/profiler", "description": "Cloud Profiler", "language": "Go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/profiler", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/profiler/latest", "release_level": "ga", "library_type": "AGENT" }, @@ -1050,7 +1347,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsub/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/pubsublite": { "distribution_name": "cloud.google.com/go/pubsublite", @@ -1058,7 +1355,7 @@ "language": "Go", "client_library_type": "manual", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/pubsublite/apiv1": { @@ -1068,25 +1365,25 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/recaptchaenterprise/apiv1": { - "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1", + "cloud.google.com/go/recaptchaenterprise/v2/apiv1": { + "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1", "description": "reCAPTCHA Enterprise API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/latest/apiv1", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/recaptchaenterprise/apiv1beta1": { - "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1beta1", + "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1": { + "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1", "description": "reCAPTCHA Enterprise API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/latest/apiv1beta1", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recommendationengine/apiv1beta1": { "distribution_name": "cloud.google.com/go/recommendationengine/apiv1beta1", @@ -1095,7 +1392,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommendationengine/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recommender/apiv1": { "distribution_name": "cloud.google.com/go/recommender/apiv1", @@ -1104,7 +1401,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommender/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recommender/apiv1beta1": { "distribution_name": "cloud.google.com/go/recommender/apiv1beta1", @@ -1113,7 +1410,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommender/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/redis/apiv1": { "distribution_name": "cloud.google.com/go/redis/apiv1", @@ -1122,7 +1419,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/redis/apiv1beta1": { "distribution_name": "cloud.google.com/go/redis/apiv1beta1", @@ -1131,7 +1428,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/resourcemanager/apiv2": { "distribution_name": "cloud.google.com/go/resourcemanager/apiv2", @@ -1140,7 +1437,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcemanager/latest/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/resourcemanager/apiv3": { "distribution_name": "cloud.google.com/go/resourcemanager/apiv3", @@ -1149,7 +1446,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcemanager/latest/apiv3", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/resourcesettings/apiv1": { "distribution_name": "cloud.google.com/go/resourcesettings/apiv1", @@ -1158,7 +1455,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcesettings/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/retail/apiv2": { "distribution_name": "cloud.google.com/go/retail/apiv2", @@ -1167,7 +1464,25 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/retail/apiv2alpha": { + "distribution_name": "cloud.google.com/go/retail/apiv2alpha", + "description": "Retail API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2alpha", + "release_level": "alpha", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/retail/apiv2beta": { + "distribution_name": "cloud.google.com/go/retail/apiv2beta", + "description": "Retail API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2beta", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/rpcreplay": { "distribution_name": "cloud.google.com/go/rpcreplay", @@ -1178,6 +1493,15 @@ "release_level": "ga", "library_type": "OTHER" }, + "cloud.google.com/go/run/apiv2": { + "distribution_name": "cloud.google.com/go/run/apiv2", + "description": "Cloud Run Admin API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/run/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/scheduler/apiv1": { "distribution_name": "cloud.google.com/go/scheduler/apiv1", "description": "Cloud Scheduler API", @@ -1185,7 +1509,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/scheduler/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/scheduler/apiv1beta1": { "distribution_name": "cloud.google.com/go/scheduler/apiv1beta1", @@ -1194,7 +1518,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/scheduler/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/secretmanager/apiv1": { "distribution_name": "cloud.google.com/go/secretmanager/apiv1", @@ -1203,16 +1527,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/secretmanager/latest/apiv1", "release_level": "ga", - "library_type": "" - }, - "cloud.google.com/go/secretmanager/apiv1beta1": { - "distribution_name": "cloud.google.com/go/secretmanager/apiv1beta1", - "description": "Secret Manager API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/secretmanager/latest/apiv1beta1", - "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/security/privateca/apiv1": { "distribution_name": "cloud.google.com/go/security/privateca/apiv1", @@ -1221,7 +1536,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/privateca/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/security/privateca/apiv1beta1": { "distribution_name": "cloud.google.com/go/security/privateca/apiv1beta1", @@ -1230,7 +1545,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/privateca/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/security/publicca/apiv1beta1": { + "distribution_name": "cloud.google.com/go/security/publicca/apiv1beta1", + "description": "Public Certificate Authority API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/publicca/apiv1beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securitycenter/apiv1": { "distribution_name": "cloud.google.com/go/securitycenter/apiv1", @@ -1239,7 +1563,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securitycenter/apiv1beta1": { "distribution_name": "cloud.google.com/go/securitycenter/apiv1beta1", @@ -1248,7 +1572,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securitycenter/apiv1p1beta1": { "distribution_name": "cloud.google.com/go/securitycenter/apiv1p1beta1", @@ -1257,7 +1581,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1p1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securitycenter/settings/apiv1beta1": { "distribution_name": "cloud.google.com/go/securitycenter/settings/apiv1beta1", @@ -1266,7 +1590,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/settings/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicecontrol/apiv1": { "distribution_name": "cloud.google.com/go/servicecontrol/apiv1", @@ -1275,7 +1599,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicecontrol/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicedirectory/apiv1": { "distribution_name": "cloud.google.com/go/servicedirectory/apiv1", @@ -1284,7 +1608,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicedirectory/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicedirectory/apiv1beta1": { "distribution_name": "cloud.google.com/go/servicedirectory/apiv1beta1", @@ -1293,7 +1617,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicedirectory/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicemanagement/apiv1": { "distribution_name": "cloud.google.com/go/servicemanagement/apiv1", @@ -1302,7 +1626,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicemanagement/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/serviceusage/apiv1": { "distribution_name": "cloud.google.com/go/serviceusage/apiv1", @@ -1311,7 +1635,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/serviceusage/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/shell/apiv1": { "distribution_name": "cloud.google.com/go/shell/apiv1", @@ -1320,7 +1644,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shell/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/spanner": { "distribution_name": "cloud.google.com/go/spanner", @@ -1333,12 +1657,12 @@ }, "cloud.google.com/go/spanner/admin/database/apiv1": { "distribution_name": "cloud.google.com/go/spanner/admin/database/apiv1", - "description": "Cloud Spanner Database Admin API", + "description": "Cloud Spanner API", "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/database/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/spanner/admin/instance/apiv1": { "distribution_name": "cloud.google.com/go/spanner/admin/instance/apiv1", @@ -1347,7 +1671,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/instance/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/spanner/apiv1": { "distribution_name": "cloud.google.com/go/spanner/apiv1", @@ -1356,7 +1680,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/speech/apiv1": { "distribution_name": "cloud.google.com/go/speech/apiv1", @@ -1365,7 +1689,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/speech/apiv1p1beta1": { "distribution_name": "cloud.google.com/go/speech/apiv1p1beta1", @@ -1374,7 +1698,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv1p1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/speech/apiv2": { + "distribution_name": "cloud.google.com/go/speech/apiv2", + "description": "Cloud Speech-to-Text API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/storage": { "distribution_name": "cloud.google.com/go/storage", @@ -1392,7 +1725,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest/internal/apiv2", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/storagetransfer/apiv1": { "distribution_name": "cloud.google.com/go/storagetransfer/apiv1", @@ -1401,7 +1734,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storagetransfer/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/talent/apiv4": { "distribution_name": "cloud.google.com/go/talent/apiv4", @@ -1409,8 +1742,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/talent/apiv4beta1": { "distribution_name": "cloud.google.com/go/talent/apiv4beta1", @@ -1419,7 +1752,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/texttospeech/apiv1": { "distribution_name": "cloud.google.com/go/texttospeech/apiv1", @@ -1428,7 +1761,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/texttospeech/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/tpu/apiv1": { "distribution_name": "cloud.google.com/go/tpu/apiv1", @@ -1437,7 +1770,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/tpu/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/trace/apiv1": { "distribution_name": "cloud.google.com/go/trace/apiv1", @@ -1446,7 +1779,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/trace/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/trace/apiv2": { "distribution_name": "cloud.google.com/go/trace/apiv2", @@ -1455,7 +1788,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/trace/latest/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/translate/apiv3": { "distribution_name": "cloud.google.com/go/translate/apiv3", @@ -1464,25 +1797,34 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/translate/latest/apiv3", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/video/transcoder/apiv1": { - "distribution_name": "cloud.google.com/go/video/transcoder/apiv1", - "description": "Transcoder API", + "cloud.google.com/go/video/livestream/apiv1": { + "distribution_name": "cloud.google.com/go/video/livestream/apiv1", + "description": "Live Stream API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/transcoder/apiv1", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/livestream/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/video/transcoder/apiv1beta1": { - "distribution_name": "cloud.google.com/go/video/transcoder/apiv1beta1", + "cloud.google.com/go/video/stitcher/apiv1": { + "distribution_name": "cloud.google.com/go/video/stitcher/apiv1", + "description": "Video Stitcher API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/stitcher/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/video/transcoder/apiv1": { + "distribution_name": "cloud.google.com/go/video/transcoder/apiv1", "description": "Transcoder API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/transcoder/apiv1beta1", - "release_level": "beta", - "library_type": "" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/transcoder/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/videointelligence/apiv1": { "distribution_name": "cloud.google.com/go/videointelligence/apiv1", @@ -1491,7 +1833,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/videointelligence/apiv1beta2": { "distribution_name": "cloud.google.com/go/videointelligence/apiv1beta2", @@ -1500,25 +1842,34 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1beta2", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/videointelligence/apiv1p3beta1": { + "distribution_name": "cloud.google.com/go/videointelligence/apiv1p3beta1", + "description": "Cloud Video Intelligence API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1p3beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/vision/apiv1": { - "distribution_name": "cloud.google.com/go/vision/apiv1", + "cloud.google.com/go/vision/v2/apiv1": { + "distribution_name": "cloud.google.com/go/vision/v2/apiv1", "description": "Cloud Vision API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/latest/apiv1", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/vision/apiv1p1beta1": { - "distribution_name": "cloud.google.com/go/vision/apiv1p1beta1", + "cloud.google.com/go/vision/v2/apiv1p1beta1": { + "distribution_name": "cloud.google.com/go/vision/v2/apiv1p1beta1", "description": "Cloud Vision API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/latest/apiv1p1beta1", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1p1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vmmigration/apiv1": { "distribution_name": "cloud.google.com/go/vmmigration/apiv1", @@ -1526,8 +1877,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vmmigration/latest/apiv1", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vpcaccess/apiv1": { "distribution_name": "cloud.google.com/go/vpcaccess/apiv1", @@ -1536,7 +1887,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vpcaccess/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/webrisk/apiv1": { "distribution_name": "cloud.google.com/go/webrisk/apiv1", @@ -1545,7 +1896,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/webrisk/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/webrisk/apiv1beta1": { "distribution_name": "cloud.google.com/go/webrisk/apiv1beta1", @@ -1554,7 +1905,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/webrisk/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/websecurityscanner/apiv1": { "distribution_name": "cloud.google.com/go/websecurityscanner/apiv1", @@ -1563,7 +1914,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/websecurityscanner/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/workflows/apiv1": { + "distribution_name": "cloud.google.com/go/workflows/apiv1", + "description": "Workflows API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workflows/apiv1beta": { "distribution_name": "cloud.google.com/go/workflows/apiv1beta", @@ -1572,7 +1932,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/apiv1beta", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workflows/executions/apiv1": { "distribution_name": "cloud.google.com/go/workflows/executions/apiv1", @@ -1581,7 +1941,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/executions/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workflows/executions/apiv1beta": { "distribution_name": "cloud.google.com/go/workflows/executions/apiv1beta", @@ -1590,6 +1950,6 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/executions/apiv1beta", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" } } diff --git a/vendor/cloud.google.com/go/internal/annotate.go b/vendor/cloud.google.com/go/internal/annotate.go index 6435695ba34b1..30d7bcf77ac63 100644 --- a/vendor/cloud.google.com/go/internal/annotate.go +++ b/vendor/cloud.google.com/go/internal/annotate.go @@ -31,7 +31,8 @@ import ( // - "google.golang.org/api/googleapi".Error // If the error is not one of these types, Annotate behaves // like -// fmt.Errorf("%s: %v", msg, err) +// +// fmt.Errorf("%s: %v", msg, err) func Annotate(err error, msg string) error { if err == nil { panic("Annotate called with nil") diff --git a/vendor/cloud.google.com/go/internal/pubsub/message.go b/vendor/cloud.google.com/go/internal/pubsub/message.go new file mode 100644 index 0000000000000..7d7092b191000 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/pubsub/message.go @@ -0,0 +1,221 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and + +package pubsub + +import ( + "context" + "time" +) + +// AckHandler implements ack/nack handling. +type AckHandler interface { + // OnAck processes a message ack. + OnAck() + + // OnNack processes a message nack. + OnNack() + + // OnAckWithResult processes a message ack and returns + // a result that shows if it succeeded. + OnAckWithResult() *AckResult + + // OnNackWithResult processes a message nack and returns + // a result that shows if it succeeded. + OnNackWithResult() *AckResult +} + +// Message represents a Pub/Sub message. +type Message struct { + // ID identifies this message. This ID is assigned by the server and is + // populated for Messages obtained from a subscription. + // + // This field is read-only. + ID string + + // Data is the actual data in the message. + Data []byte + + // Attributes represents the key-value pairs the current message is + // labelled with. + Attributes map[string]string + + // PublishTime is the time at which the message was published. This is + // populated by the server for Messages obtained from a subscription. + // + // This field is read-only. + PublishTime time.Time + + // DeliveryAttempt is the number of times a message has been delivered. + // This is part of the dead lettering feature that forwards messages that + // fail to be processed (from nack/ack deadline timeout) to a dead letter topic. + // If dead lettering is enabled, this will be set on all attempts, starting + // with value 1. Otherwise, the value will be nil. + // This field is read-only. + DeliveryAttempt *int + + // OrderingKey identifies related messages for which publish order should + // be respected. If empty string is used, message will be sent unordered. + OrderingKey string + + // ackh handles Ack() or Nack(). + ackh AckHandler +} + +// Ack indicates successful processing of a Message passed to the Subscriber.Receive callback. +// It should not be called on any other Message value. +// If message acknowledgement fails, the Message will be redelivered. +// Client code must call Ack or Nack when finished for each received Message. +// Calls to Ack or Nack have no effect after the first call. +func (m *Message) Ack() { + if m.ackh != nil { + m.ackh.OnAck() + } +} + +// Nack indicates that the client will not or cannot process a Message passed to the Subscriber.Receive callback. +// It should not be called on any other Message value. +// Nack will result in the Message being redelivered more quickly than if it were allowed to expire. +// Client code must call Ack or Nack when finished for each received Message. +// Calls to Ack or Nack have no effect after the first call. +func (m *Message) Nack() { + if m.ackh != nil { + m.ackh.OnNack() + } +} + +// AcknowledgeStatus represents the status of an Ack or Nack request. +type AcknowledgeStatus int + +const ( + // AcknowledgeStatusSuccess indicates the request was a success. + AcknowledgeStatusSuccess AcknowledgeStatus = iota + // AcknowledgeStatusPermissionDenied indicates the caller does not have sufficient permissions. + AcknowledgeStatusPermissionDenied + // AcknowledgeStatusFailedPrecondition indicates the request encountered a FailedPrecondition error. + AcknowledgeStatusFailedPrecondition + // AcknowledgeStatusInvalidAckID indicates one or more of the ack IDs sent were invalid. + AcknowledgeStatusInvalidAckID + // AcknowledgeStatusOther indicates another unknown error was returned. + AcknowledgeStatusOther +) + +// AckResult holds the result from a call to Ack or Nack. +type AckResult struct { + ready chan struct{} + res AcknowledgeStatus + err error +} + +// Ready returns a channel that is closed when the result is ready. +// When the Ready channel is closed, Get is guaranteed not to block. +func (r *AckResult) Ready() <-chan struct{} { return r.ready } + +// Get returns the status and/or error result of a Ack, Nack, or Modack call. +// Get blocks until the Ack/Nack completes or the context is done. +func (r *AckResult) Get(ctx context.Context) (res AcknowledgeStatus, err error) { + // If the result is already ready, return it even if the context is done. + select { + case <-r.Ready(): + return r.res, r.err + default: + } + select { + case <-ctx.Done(): + // Explicitly return AcknowledgeStatusOther for context cancelled cases, + // since the default is success. + return AcknowledgeStatusOther, ctx.Err() + case <-r.Ready(): + return r.res, r.err + } +} + +// NewAckResult creates a AckResult. +func NewAckResult() *AckResult { + return &AckResult{ + ready: make(chan struct{}), + } +} + +// SetAckResult sets the ack response and error for a ack result and closes +// the Ready channel. Any call after the first for the same AckResult +// is a no-op. +func SetAckResult(r *AckResult, res AcknowledgeStatus, err error) { + select { + case <-r.Ready(): + return + default: + r.res = res + r.err = err + close(r.ready) + } +} + +// AckWithResult acknowledges a message in Pub/Sub and it will not be +// delivered to this subscription again. +// +// You should avoid acknowledging messages until you have +// *finished* processing them, so that in the event of a failure, +// you receive the message again. +// +// If exactly-once delivery is enabled on the subscription, the +// AckResult returned by this method tracks the state of acknowledgement +// operation. If the operation completes successfully, the message is +// guaranteed NOT to be re-delivered. Otherwise, the result will +// contain an error with more details about the failure and the +// message may be re-delivered. +// +// If exactly-once delivery is NOT enabled on the subscription, or +// if using Pub/Sub Lite, AckResult readies immediately with a AcknowledgeStatus.Success. +// Since acks in Cloud Pub/Sub are best effort when exactly-once +// delivery is disabled, the message may be re-delivered. Because +// re-deliveries are possible, you should ensure that your processing +// code is idempotent, as you may receive any given message more than +// once. +func (m *Message) AckWithResult() *AckResult { + if m.ackh != nil { + return m.ackh.OnAckWithResult() + } + return nil +} + +// NackWithResult declines to acknowledge the message which indicates that +// the client will not or cannot process a Message. This will cause the message +// to be re-delivered to subscribers. Re-deliveries may take place immediately +// or after a delay. +// +// If exactly-once delivery is enabled on the subscription, the +// AckResult returned by this method tracks the state of nack +// operation. If the operation completes successfully, the result will +// contain AckResponse.Success. Otherwise, the result will contain an error +// with more details about the failure. +// +// If exactly-once delivery is NOT enabled on the subscription, or +// if using Pub/Sub Lite, AckResult readies immediately with a AcknowledgeStatus.Success. +func (m *Message) NackWithResult() *AckResult { + if m.ackh != nil { + return m.ackh.OnNackWithResult() + } + return nil +} + +// NewMessage creates a message with an AckHandler implementation, which should +// not be nil. +func NewMessage(ackh AckHandler) *Message { + return &Message{ackh: ackh} +} + +// MessageAckHandler provides access to the internal field Message.ackh. +func MessageAckHandler(m *Message) AckHandler { + return m.ackh +} diff --git a/vendor/cloud.google.com/go/internal/pubsub/publish.go b/vendor/cloud.google.com/go/internal/pubsub/publish.go new file mode 100644 index 0000000000000..d03ab0f7796fa --- /dev/null +++ b/vendor/cloud.google.com/go/internal/pubsub/publish.go @@ -0,0 +1,57 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and + +package pubsub + +import "context" + +// A PublishResult holds the result from a call to Publish. +type PublishResult struct { + ready chan struct{} + serverID string + err error +} + +// Ready returns a channel that is closed when the result is ready. +// When the Ready channel is closed, Get is guaranteed not to block. +func (r *PublishResult) Ready() <-chan struct{} { return r.ready } + +// Get returns the server-generated message ID and/or error result of a Publish call. +// Get blocks until the Publish call completes or the context is done. +func (r *PublishResult) Get(ctx context.Context) (serverID string, err error) { + // If the result is already ready, return it even if the context is done. + select { + case <-r.Ready(): + return r.serverID, r.err + default: + } + select { + case <-ctx.Done(): + return "", ctx.Err() + case <-r.Ready(): + return r.serverID, r.err + } +} + +// NewPublishResult creates a PublishResult. +func NewPublishResult() *PublishResult { + return &PublishResult{ready: make(chan struct{})} +} + +// SetPublishResult sets the server ID and error for a publish result and closes +// the Ready channel. +func SetPublishResult(r *PublishResult, sid string, err error) { + r.serverID = sid + r.err = err + close(r.ready) +} diff --git a/vendor/cloud.google.com/go/internal/testutil/context.go b/vendor/cloud.google.com/go/internal/testutil/context.go index edada10464b41..7ece9928a66d6 100644 --- a/vendor/cloud.google.com/go/internal/testutil/context.go +++ b/vendor/cloud.google.com/go/internal/testutil/context.go @@ -26,11 +26,13 @@ import ( "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" + "google.golang.org/api/impersonate" ) const ( - envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" - envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" + envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" + envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" + envImpersonate = "GCLOUD_TESTS_IMPERSONATE_CREDENTIALS" ) // ProjID returns the project ID to use in integration tests, or the empty @@ -52,6 +54,12 @@ func Credentials(ctx context.Context, scopes ...string) *google.Credentials { // will return nil. CredentialsEnv will log.Fatal if the token source is // specified but missing or invalid. func CredentialsEnv(ctx context.Context, envVar string, scopes ...string) *google.Credentials { + if impKey := os.Getenv(envImpersonate); impKey == "true" { + return &google.Credentials{ + TokenSource: impersonatedTokenSource(ctx, scopes), + ProjectID: "dulcet-port-762", + } + } key := os.Getenv(envVar) if key == "" { // Try for application default credentials. creds, err := google.FindDefaultCredentials(ctx, scopes...) @@ -88,6 +96,9 @@ func TokenSource(ctx context.Context, scopes ...string) oauth2.TokenSource { // return nil. TokenSourceEnv will log.Fatal if the token source is specified but // missing or invalid. func TokenSourceEnv(ctx context.Context, envVar string, scopes ...string) oauth2.TokenSource { + if impKey := os.Getenv(envImpersonate); impKey == "true" { + return impersonatedTokenSource(ctx, scopes) + } key := os.Getenv(envVar) if key == "" { // Try for application default credentials. ts, err := google.DefaultTokenSource(ctx, scopes...) @@ -104,6 +115,17 @@ func TokenSourceEnv(ctx context.Context, envVar string, scopes ...string) oauth2 return conf.TokenSource(ctx) } +func impersonatedTokenSource(ctx context.Context, scopes []string) oauth2.TokenSource { + ts, err := impersonate.CredentialsTokenSource(ctx, impersonate.CredentialsConfig{ + TargetPrincipal: "kokoro@dulcet-port-762.iam.gserviceaccount.com", + Scopes: scopes, + }) + if err != nil { + log.Fatalf("Unable to impersonate credentials, exiting: %v", err) + } + return ts +} + // JWTConfig reads the JSON private key file whose name is in the default // environment variable, and returns the jwt.Config it contains. It ignores // scopes. diff --git a/vendor/cloud.google.com/go/longrunning/CHANGES.md b/vendor/cloud.google.com/go/longrunning/CHANGES.md new file mode 100644 index 0000000000000..3f9074b6e0455 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/CHANGES.md @@ -0,0 +1,12 @@ +# Changes + +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.2.1...longrunning/v0.3.0) (2022-11-03) + + +### Features + +* **longrunning:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) + +## v0.1.0 + +Initial release. diff --git a/vendor/cloud.google.com/go/longrunning/LICENSE b/vendor/cloud.google.com/go/longrunning/LICENSE new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/longrunning/README.md b/vendor/cloud.google.com/go/longrunning/README.md new file mode 100644 index 0000000000000..a07f3093fd38a --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/README.md @@ -0,0 +1,26 @@ +# longrunning + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/longrunning.svg)](https://pkg.go.dev/cloud.google.com/go/longrunning) + +A helper library for working with long running operations. + +## Install + +```bash +go get cloud.google.com/go/longrunning +``` + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/longrunning/autogen/doc.go b/vendor/cloud.google.com/go/longrunning/autogen/doc.go index 8d4f10e9f05f2..a3c2461ea3b2f 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/doc.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/doc.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,53 +17,64 @@ // Package longrunning is an auto-generated package for the // Long Running Operations API. // -// NOTE: This package is in alpha. It is not stable, and is likely to change. +// NOTE: This package is in alpha. It is not stable, and is likely to change. // -// Example usage +// # Example usage // // To get started with this package, create a client. -// ctx := context.Background() -// c, err := longrunning.NewOperationsClient(ctx) -// if err != nil { -// // TODO: Handle error. -// } -// defer c.Close() +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := longrunning.NewOperationsClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() // // The client will use your default application credentials. Clients should be reused instead of created as needed. // The methods of Client are safe for concurrent use by multiple goroutines. // The returned client must be Closed when it is done being used. // -// Using the Client +// # Using the Client // // The following is an example of making an API call with the newly created client. // -// ctx := context.Background() -// c, err := longrunning.NewOperationsClient(ctx) -// if err != nil { -// // TODO: Handle error. -// } -// defer c.Close() -// -// req := &longrunningpb.ListOperationsRequest{ -// // TODO: Fill request struct fields. -// // See https://pkg.go.dev/google.golang.org/genproto/googleapis/longrunning#ListOperationsRequest. -// } -// it := c.ListOperations(ctx, req) -// for { -// resp, err := it.Next() -// if err == iterator.Done { -// break -// } -// if err != nil { -// // TODO: Handle error. -// } -// // TODO: Use resp. -// _ = resp -// } -// -// Use of Context -// -// The ctx passed to NewClient is used for authentication requests and +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := longrunning.NewOperationsClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// req := &longrunningpb.ListOperationsRequest{ +// // TODO: Fill request struct fields. +// // See https://pkg.go.dev/cloud.google.com/go/longrunning/autogen/longrunningpb#ListOperationsRequest. +// } +// it := c.ListOperations(ctx, req) +// for { +// resp, err := it.Next() +// if err == iterator.Done { +// break +// } +// if err != nil { +// // TODO: Handle error. +// } +// // TODO: Use resp. +// _ = resp +// } +// +// # Use of Context +// +// The ctx passed to NewOperationsClient is used for authentication requests and // for creating the underlying connection, but is not used for subsequent calls. // Individual methods on the client use the ctx given to them. // @@ -75,6 +86,8 @@ package longrunning // import "cloud.google.com/go/longrunning/autogen" import ( "context" + "fmt" + "net/http" "os" "runtime" "strconv" @@ -90,7 +103,14 @@ import ( type clientHookParams struct{} type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) -const versionClient = "20211208" +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { out, _ := metadata.FromOutgoingContext(ctx) @@ -115,7 +135,9 @@ func checkDisableDeadlines() (bool, error) { // DefaultAuthScopes reports the default set of authentication scopes to use with this package. func DefaultAuthScopes() []string { - return []string{} + return []string{ + "", + } } // versionGo returns the Go runtime version. The returned string @@ -154,3 +176,22 @@ func versionGo() string { } return "UNKNOWN" } + +// maybeUnknownEnum wraps the given proto-JSON parsing error if it is the result +// of receiving an unknown enum value. +func maybeUnknownEnum(err error) error { + if strings.Contains(err.Error(), "invalid value for enum type") { + err = fmt.Errorf("received an unknown enum value; a later version of the library may support it: %w", err) + } + return err +} + +// buildHeaders extracts metadata from the outgoing context, joins it with any other +// given metadata, and converts them into a http.Header. +func buildHeaders(ctx context.Context, mds ...metadata.MD) http.Header { + if cmd, ok := metadata.FromOutgoingContext(ctx); ok { + mds = append(mds, cmd) + } + md := metadata.Join(mds...) + return http.Header(md) +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json b/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json index eff1271b207c5..5271428216654 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json +++ b/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json @@ -36,6 +36,36 @@ ] } } + }, + "rest": { + "libraryClient": "OperationsClient", + "rpcs": { + "CancelOperation": { + "methods": [ + "CancelOperation" + ] + }, + "DeleteOperation": { + "methods": [ + "DeleteOperation" + ] + }, + "GetOperation": { + "methods": [ + "GetOperation" + ] + }, + "ListOperations": { + "methods": [ + "ListOperations" + ] + }, + "WaitOperation": { + "methods": [ + "WaitOperation" + ] + } + } } } } diff --git a/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go b/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go similarity index 99% rename from vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go rename to vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go index 44b30dca5490f..6f3ae542be889 100644 --- a/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.5 // source: google/longrunning/operations.proto -package longrunning +package longrunningpb import ( context "context" @@ -70,6 +70,7 @@ type Operation struct { // If `done` == `true`, exactly one of `error` or `response` is set. // // Types that are assignable to Result: + // // *Operation_Error // *Operation_Response Result isOperation_Result `protobuf_oneof:"result"` @@ -519,13 +520,13 @@ func (x *WaitOperationRequest) GetTimeout() *durationpb.Duration { // // Example: // -// rpc LongRunningRecognize(LongRunningRecognizeRequest) -// returns (google.longrunning.Operation) { -// option (google.longrunning.operation_info) = { -// response_type: "LongRunningRecognizeResponse" -// metadata_type: "LongRunningRecognizeMetadata" -// }; -// } +// rpc LongRunningRecognize(LongRunningRecognizeRequest) +// returns (google.longrunning.Operation) { +// option (google.longrunning.operation_info) = { +// response_type: "LongRunningRecognizeResponse" +// metadata_type: "LongRunningRecognizeMetadata" +// }; +// } type OperationInfo struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go index b90b54cce4e87..a04f1e341f43c 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,21 +17,27 @@ package longrunning import ( + "bytes" "context" "fmt" + "io/ioutil" "math" + "net/http" "net/url" "time" + longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/googleapi" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" - longrunningpb "google.golang.org/genproto/googleapis/longrunning" + httptransport "google.golang.org/api/transport/http" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" ) @@ -108,7 +114,53 @@ func defaultOperationsCallOptions() *OperationsCallOptions { } } -// internalOperationsClient is an interface that defines the methods availaible from Long Running Operations API. +func defaultOperationsRESTCallOptions() *OperationsCallOptions { + return &OperationsCallOptions{ + ListOperations: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }, + http.StatusServiceUnavailable) + }), + }, + GetOperation: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }, + http.StatusServiceUnavailable) + }), + }, + DeleteOperation: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }, + http.StatusServiceUnavailable) + }), + }, + CancelOperation: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }, + http.StatusServiceUnavailable) + }), + }, + WaitOperation: []gax.CallOption{}, + } +} + +// internalOperationsClient is an interface that defines the methods available from Long Running Operations API. type internalOperationsClient interface { Close() error setGoogleClientInfo(...string) @@ -157,7 +209,8 @@ func (c *OperationsClient) setGoogleClientInfo(keyval ...string) { // Connection returns a connection to the API service. // -// Deprecated. +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. func (c *OperationsClient) Connection() *grpc.ClientConn { return c.internalClient.Connection() } @@ -286,7 +339,8 @@ func NewOperationsClient(ctx context.Context, opts ...option.ClientOption) (*Ope // Connection returns a connection to the API service. // -// Deprecated. +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. func (c *operationsGRPCClient) Connection() *grpc.ClientConn { return c.connPool.Conn() } @@ -296,7 +350,7 @@ func (c *operationsGRPCClient) Connection() *grpc.ClientConn { // use by Google-written clients. func (c *operationsGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } @@ -306,8 +360,85 @@ func (c *operationsGRPCClient) Close() error { return c.connPool.Close() } +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type operationsRESTClient struct { + // The http endpoint to connect to. + endpoint string + + // The http client. + httpClient *http.Client + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD + + // Points back to the CallOptions field of the containing OperationsClient + CallOptions **OperationsCallOptions +} + +// NewOperationsRESTClient creates a new operations rest client. +// +// Manages long-running operations with an API service. +// +// When an API method normally takes long time to complete, it can be designed +// to return Operation to the client, and the client can use this +// interface to receive the real response asynchronously by polling the +// operation resource, or pass the operation resource to another API (such as +// Google Cloud Pub/Sub API) to receive the response. Any API service that +// returns long-running operations should implement the Operations interface +// so developers can have a consistent client experience. +func NewOperationsRESTClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) { + clientOpts := append(defaultOperationsRESTClientOptions(), opts...) + httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...) + if err != nil { + return nil, err + } + + callOpts := defaultOperationsRESTCallOptions() + c := &operationsRESTClient{ + endpoint: endpoint, + httpClient: httpClient, + CallOptions: &callOpts, + } + c.setGoogleClientInfo() + + return &OperationsClient{internalClient: c, CallOptions: callOpts}, nil +} + +func defaultOperationsRESTClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("https://longrunning.googleapis.com"), + internaloption.WithDefaultMTLSEndpoint("https://longrunning.mtls.googleapis.com"), + internaloption.WithDefaultAudience("https://longrunning.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + } +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *operationsRESTClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *operationsRESTClient) Close() error { + // Replace httpClient with nil to force cleanup. + c.httpClient = nil + return nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: This method always returns nil. +func (c *operationsRESTClient) Connection() *grpc.ClientConn { + return nil +} func (c *operationsGRPCClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).ListOperations[0:len((*c.CallOptions).ListOperations):len((*c.CallOptions).ListOperations)], opts...) it := &OperationIterator{} @@ -357,6 +488,7 @@ func (c *operationsGRPCClient) GetOperation(ctx context.Context, req *longrunnin ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) var resp *longrunningpb.Operation @@ -378,6 +510,7 @@ func (c *operationsGRPCClient) DeleteOperation(ctx context.Context, req *longrun ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).DeleteOperation[0:len((*c.CallOptions).DeleteOperation):len((*c.CallOptions).DeleteOperation)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -395,6 +528,7 @@ func (c *operationsGRPCClient) CancelOperation(ctx context.Context, req *longrun ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).CancelOperation[0:len((*c.CallOptions).CancelOperation):len((*c.CallOptions).CancelOperation)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -420,6 +554,321 @@ func (c *operationsGRPCClient) WaitOperation(ctx context.Context, req *longrunni return resp, nil } +// ListOperations lists operations that match the specified filter in the request. If the +// server doesn’t support this method, it returns UNIMPLEMENTED. +// +// NOTE: the name binding allows API services to override the binding +// to use different resource name schemes, such as users/*/operations. To +// override the binding, API services can add a binding such as +// "/v1/{name=users/*}/operations" to their service configuration. +// For backwards compatibility, the default name includes the operations +// collection id, however overriding users must ensure the name binding +// is the parent resource, without the operations collection id. +func (c *operationsRESTClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { + it := &OperationIterator{} + req = proto.Clone(req).(*longrunningpb.ListOperationsRequest) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) { + resp := &longrunningpb.ListOperationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, "", err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + if req.GetFilter() != "" { + params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) + } + if req.GetPageSize() != 0 { + params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize())) + } + if req.GetPageToken() != "" { + params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + headers := buildHeaders(ctx, c.xGoogMetadata, metadata.Pairs("Content-Type", "application/json")) + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := ioutil.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return maybeUnknownEnum(err) + } + + return nil + }, opts...) + if e != nil { + return nil, "", e + } + it.Response = resp + return resp.GetOperations(), resp.GetNextPageToken(), nil + } + + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +// GetOperation gets the latest state of a long-running operation. Clients can use this +// method to poll the operation result at intervals as recommended by the API +// service. +func (c *operationsRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + // Build HTTP headers from client and context metadata. + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + headers := buildHeaders(ctx, c.xGoogMetadata, md, metadata.Pairs("Content-Type", "application/json")) + opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &longrunningpb.Operation{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := ioutil.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return maybeUnknownEnum(err) + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// DeleteOperation deletes a long-running operation. This method indicates that the client is +// no longer interested in the operation result. It does not cancel the +// operation. If the server doesn’t support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +func (c *operationsRESTClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + // Build HTTP headers from client and context metadata. + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + headers := buildHeaders(ctx, c.xGoogMetadata, md, metadata.Pairs("Content-Type", "application/json")) + return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + // Returns nil if there is no error, otherwise wraps + // the response code and body into a non-nil error + return googleapi.CheckResponse(httpRsp) + }, opts...) +} + +// CancelOperation starts asynchronous cancellation on a long-running operation. The server +// makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn’t support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. Clients can use +// Operations.GetOperation or +// other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, +// the operation is not deleted; instead, it becomes an operation with +// an Operation.error value with a google.rpc.Status.code of 1, +// corresponding to Code.CANCELLED. +func (c *operationsRESTClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:cancel", req.GetName()) + + // Build HTTP headers from client and context metadata. + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + headers := buildHeaders(ctx, c.xGoogMetadata, md, metadata.Pairs("Content-Type", "application/json")) + return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + // Returns nil if there is no error, otherwise wraps + // the response code and body into a non-nil error + return googleapi.CheckResponse(httpRsp) + }, opts...) +} + +// WaitOperation waits until the specified long-running operation is done or reaches at most +// a specified timeout, returning the latest state. If the operation is +// already done, the latest state is immediately returned. If the timeout +// specified is greater than the default HTTP/RPC timeout, the HTTP/RPC +// timeout is used. If the server does not support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +// Note that this method is on a best-effort basis. It may return the latest +// state before the specified timeout (including immediately), meaning even an +// immediate response is no guarantee that the operation is done. +func (c *operationsRESTClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("") + + params := url.Values{} + if req.GetName() != "" { + params.Add("name", fmt.Sprintf("%v", req.GetName())) + } + if req.GetTimeout() != nil { + timeout, err := protojson.Marshal(req.GetTimeout()) + if err != nil { + return nil, err + } + params.Add("timeout", string(timeout)) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + headers := buildHeaders(ctx, c.xGoogMetadata, metadata.Pairs("Content-Type", "application/json")) + opts = append((*c.CallOptions).WaitOperation[0:len((*c.CallOptions).WaitOperation):len((*c.CallOptions).WaitOperation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &longrunningpb.Operation{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := ioutil.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return maybeUnknownEnum(err) + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + // OperationIterator manages a stream of *longrunningpb.Operation. type OperationIterator struct { items []*longrunningpb.Operation diff --git a/vendor/cloud.google.com/go/iam/go_mod_tidy_hack.go b/vendor/cloud.google.com/go/longrunning/tidyfix.go similarity index 88% rename from vendor/cloud.google.com/go/iam/go_mod_tidy_hack.go rename to vendor/cloud.google.com/go/longrunning/tidyfix.go index fbdd65f60c086..d9a07f99e0da4 100644 --- a/vendor/cloud.google.com/go/iam/go_mod_tidy_hack.go +++ b/vendor/cloud.google.com/go/longrunning/tidyfix.go @@ -12,11 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the cloud.google.com/go import, won't actually become part of +// This file, and the {{.RootMod}} import, won't actually become part of // the resultant binary. +//go:build modhack // +build modhack -package iam +package longrunning // Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository import _ "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/migration.md b/vendor/cloud.google.com/go/migration.md new file mode 100644 index 0000000000000..224dcfa139727 --- /dev/null +++ b/vendor/cloud.google.com/go/migration.md @@ -0,0 +1,50 @@ +# go-genproto to google-cloud-go message type migration + +The message types for all of our client libraries are being migrated from the +`google.golang.org/genproto` [module](https://pkg.go.dev/google.golang.org/genproto) +to their respective product specific module in this repository. For example +this asset request type that was once found in [genproto](https://pkg.go.dev/google.golang.org/genproto@v0.0.0-20220908141613-51c1cc9bc6d0/googleapis/cloud/asset/v1p5beta1#ListAssetsRequest) +can now be found in directly in the [asset module](https://pkg.go.dev/cloud.google.com/go/asset/apiv1p5beta1/assetpb#ListAssetsRequest). + +Although the type definitions have moved, aliases have been left in the old +genproto packages to ensure a smooth non-breaking transition. + +## How do I migrate to the new packages? + +The easiest option is to run a migration tool at the root of our project. It is +like `go fix`, but specifically for this migration. Before running the tool it +is best to make sure any modules that have the prefix of `cloud.google.com/go` +are up to date. To run the tool, do the following: + +```bash +go run cloud.google.com/go/internal/aliasfix/cmd/aliasfix@latest . +go mod tidy +``` + +The tool should only change up to one line in the import statement per file. +This can also be done by hand if you prefer. + +## Do I have to migrate? + +Yes if you wish to keep using the newest versions of our client libraries with +the newest features -- You should migrate by the start of 2023. Until then we +will keep updating the aliases in go-genproto weekly. If you have an existing +workload that uses these client libraries and does not need to update its +dependencies there is no action to take. All existing written code will continue +to work. + +## Why are these types being moved + +1. This change will help simplify dependency trees over time. +2. The types will now be in product specific modules that are versioned + independently with semver. This is especially a benefit for users that rely + on multiple clients in a single application. Because message types are no + longer mono-packaged users are less likely to run into intermediate + dependency conflicts when updating dependencies. +3. Having all these types in one repository will help us ensure that unintended + changes are caught before they would be released. + +## Have questions? + +Please reach out to us on our [issue tracker](https://github.com/googleapis/google-cloud-go/issues/new?assignees=&labels=genproto-migration&template=migration-issue.md&title=package%3A+migration+help) +if you have any questions or concerns. diff --git a/vendor/cloud.google.com/go/pubsub/CHANGES.md b/vendor/cloud.google.com/go/pubsub/CHANGES.md index 49de654c5bb46..b044ce3dbbe9e 100644 --- a/vendor/cloud.google.com/go/pubsub/CHANGES.md +++ b/vendor/cloud.google.com/go/pubsub/CHANGES.md @@ -1,5 +1,359 @@ # Changes +## [1.27.1](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.27.0...pubsub/v1.27.1) (2022-12-02) + + +### Bug Fixes + +* **pubsub:** downgrade some dependencies ([7540152](https://github.com/googleapis/google-cloud-go/commit/754015236d5af7c82a75da218b71a87b9ead6eb5)) + +## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.26.0...pubsub/v1.27.0) (2022-11-29) + + +### Features + +* **pubsub:** start generating proto stubs ([cf89415](https://github.com/googleapis/google-cloud-go/commit/cf894154e451a32b431fef2af3781a0d2d8080ff)) + +## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.25.1...pubsub/v1.26.0) (2022-10-24) + + +### Features + +* **pubsub:** Add support for snapshot labels ([#6835](https://github.com/googleapis/google-cloud-go/issues/6835)) ([c17851b](https://github.com/googleapis/google-cloud-go/commit/c17851b5c3d811cd3e6a28162f0e399bb31a1363)) + + +### Bug Fixes + +* **pubsub:** Remove unused AckResult map ([#6656](https://github.com/googleapis/google-cloud-go/issues/6656)) ([5f69002](https://github.com/googleapis/google-cloud-go/commit/5f690022551ac584e5c66af4324a17d7044a898d)) + + +### Documentation + +* **pubsub:** Fix comments on message for exactly once delivery ([#6878](https://github.com/googleapis/google-cloud-go/issues/6878)) ([a8109e2](https://github.com/googleapis/google-cloud-go/commit/a8109e2d3257d1698ce1b751618428ef25cbb859)), refs [#6877](https://github.com/googleapis/google-cloud-go/issues/6877) +* **pubsub:** Update streams section ([#6682](https://github.com/googleapis/google-cloud-go/issues/6682)) ([7b4e2b4](https://github.com/googleapis/google-cloud-go/commit/7b4e2b412058f965a9f9159231afe551a6f58a74)) +* **pubsub:** Update subscription retry policy defaults ([#6909](https://github.com/googleapis/google-cloud-go/issues/6909)) ([c5c2f8f](https://github.com/googleapis/google-cloud-go/commit/c5c2f8f7125034c611edf1d08ca35ece6554c454)), refs [#6903](https://github.com/googleapis/google-cloud-go/issues/6903) + +## [1.25.1](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.25.0...pubsub/v1.25.1) (2022-08-24) + + +### Bug Fixes + +* **pubsub:** up version of cloud.google.com/go ([#6558](https://github.com/googleapis/google-cloud-go/issues/6558)) ([be9dcfb](https://github.com/googleapis/google-cloud-go/commit/be9dcfbdfa5876a548eb3c60337c38e1d282bb88)), refs [#6555](https://github.com/googleapis/google-cloud-go/issues/6555) + +## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.24.0...pubsub/v1.25.0) (2022-08-23) + + +### Features + +* **pubsub:** support exactly once delivery ([#6506](https://github.com/googleapis/google-cloud-go/issues/6506)) ([74da335](https://github.com/googleapis/google-cloud-go/commit/74da335fea6cd70b27808507f2e58ae53f5f4910)) + + +### Documentation + +* **pubsub:** typo ([#6453](https://github.com/googleapis/google-cloud-go/issues/6453)) ([34d839e](https://github.com/googleapis/google-cloud-go/commit/34d839ec546633a0fb7f73448337ac8d8c796acd)) + +## [1.24.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.23.1...pubsub/v1.24.0) (2022-07-18) + + +### Features + +* **pubsub/pstest:** subscription message ordering ([#6257](https://github.com/googleapis/google-cloud-go/issues/6257)) ([71bd273](https://github.com/googleapis/google-cloud-go/commit/71bd273b8a77ed22c41a1284813ee59eb6820bda)) + + +### Bug Fixes + +* **pubsub:** make receipt modack call async ([#6335](https://github.com/googleapis/google-cloud-go/issues/6335)) ([d12ca07](https://github.com/googleapis/google-cloud-go/commit/d12ca07720b6b29360e583c1eea22f001239952f)) + +## [1.23.1](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.23.0...pubsub/v1.23.1) (2022-06-30) + + +### Bug Fixes + +* **pubsub:** increase modack deadline RPC timeout ([#6289](https://github.com/googleapis/google-cloud-go/issues/6289)) ([d24600f](https://github.com/googleapis/google-cloud-go/commit/d24600fda7e574a388e8898c2ecc1958d07f4224)) + +## [1.23.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.22.2...pubsub/v1.23.0) (2022-06-23) + + +### Features + +* **pubsub:** report publisher outstanding metrics ([#6187](https://github.com/googleapis/google-cloud-go/issues/6187)) ([cc1528b](https://github.com/googleapis/google-cloud-go/commit/cc1528b2bfebbb48d49bcacd639abf2cf3468c96)) +* **pubsub:** support bigquery subscriptions ([#6119](https://github.com/googleapis/google-cloud-go/issues/6119)) ([81f704a](https://github.com/googleapis/google-cloud-go/commit/81f704a2cdeece8f73d7c09eae730a905afdb870)) + +## [1.22.2](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.22.1...pubsub/v1.22.2) (2022-06-03) + + +### Bug Fixes + +* **pubsub:** fix iterator distribution bound calculations ([#6125](https://github.com/googleapis/google-cloud-go/issues/6125)) ([6c470ff](https://github.com/googleapis/google-cloud-go/commit/6c470ff02072d7af32ee07a772c5d0796b545a45)) + +## [1.22.1](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.22.0...pubsub/v1.22.1) (2022-06-02) + + +### Bug Fixes + +* **pubsub:** use MaxInt instead of MaxInt64 for BufferedByteLimit ([#6113](https://github.com/googleapis/google-cloud-go/issues/6113)) ([06721e0](https://github.com/googleapis/google-cloud-go/commit/06721e06a16f5c94a31b96809aad02f5eb38147c)) + +## [1.22.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.21.1...pubsub/v1.22.0) (2022-05-31) + + +### Features + +* **pubsub:** add BigQuery configuration for subscriptions ([6ef576e](https://github.com/googleapis/google-cloud-go/commit/6ef576e2d821d079e7b940cd5d49fe3ca64a7ba2)) +* **pubsub:** add min extension period ([#6041](https://github.com/googleapis/google-cloud-go/issues/6041)) ([f2407c7](https://github.com/googleapis/google-cloud-go/commit/f2407c7013bbfdfc0103296accc828b0be674f5d)) + + +### Bug Fixes + +* **pubsub:** disable deprecated BufferedByteLimit when using MaxOutstandingBytes ([#6009](https://github.com/googleapis/google-cloud-go/issues/6009)) ([dbfdf76](https://github.com/googleapis/google-cloud-go/commit/dbfdf762c77f9cfad637c573b06f0a49e01316f3)) + +### [1.21.1](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.21.0...pubsub/v1.21.1) (2022-05-04) + + +### Bug Fixes + +* **pubsub:** mark ignore option default for publish flow control ([#5983](https://github.com/googleapis/google-cloud-go/issues/5983)) ([3f41531](https://github.com/googleapis/google-cloud-go/commit/3f41531579b7a55acea66fec8362e9134125c8a0)) + +## [1.21.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.20.0...pubsub/v1.21.0) (2022-04-26) + + +### Features + +* **pubsub:** deprecate synchronous mode ([#5910](https://github.com/googleapis/google-cloud-go/issues/5910)) ([bda5179](https://github.com/googleapis/google-cloud-go/commit/bda5179fa240b1468cd1043128493f634be28986)) + + +### Bug Fixes + +* **pubsub:** enable updating enable_exactly_once_delivery in fake pubsub ([#5940](https://github.com/googleapis/google-cloud-go/issues/5940)) ([ee44bf6](https://github.com/googleapis/google-cloud-go/commit/ee44bf646af1c38ed0943a997051b0225e22a6bf)) +* **pubsub:** nack messages properly with error from receive scheduler ([#5909](https://github.com/googleapis/google-cloud-go/issues/5909)) ([80edea4](https://github.com/googleapis/google-cloud-go/commit/80edea40dd722efb3c15cd3de3f24e0e7ad08ed7)) + +## [1.20.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.19.0...pubsub/v1.20.0) (2022-04-11) + + +### Features + +* **pubsub/pstest:** add topic retention support ([#4790](https://github.com/googleapis/google-cloud-go/issues/4790)) ([0a4ad6a](https://github.com/googleapis/google-cloud-go/commit/0a4ad6a72ddc379a94a88ec70ac678a227843cfd)) + + +### Bug Fixes + +* **pubsub:** ignore grpc errors in ack/modack ([#5796](https://github.com/googleapis/google-cloud-go/issues/5796)) ([4fb9aec](https://github.com/googleapis/google-cloud-go/commit/4fb9aecd2bc415c846e26eb960859e10e1af61f3)) + +## [1.19.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.18.0...pubsub/v1.19.0) (2022-03-07) + + +### Features + +* **pubsub:** add better version metadata to calls ([d1ad921](https://github.com/googleapis/google-cloud-go/commit/d1ad921d0322e7ce728ca9d255a3cf0437d26add)) +* **pubsub:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) + + +### Bug Fixes + +* **pubsub:** prevent infinite retry with publishing invalid utf-8 chars ([#5728](https://github.com/googleapis/google-cloud-go/issues/5728)) ([0a4dab9](https://github.com/googleapis/google-cloud-go/commit/0a4dab9043db81342dc41bd496d35fd4a7b08ad5)) +* **pubsub:** removing misspelled field, add correctly spelled field ([4a223de](https://github.com/googleapis/google-cloud-go/commit/4a223de8eab072d95818c761e41fb3f3f6ac728c)) + +## [1.18.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.17.1...pubsub/v1.18.0) (2022-02-08) + + +### Features + +* **pubsub:** add exactly once delivery flag ([f71dc3d](https://www.github.com/googleapis/google-cloud-go/commit/f71dc3dfefa54ab41861aea15971108850a9f98b)) +* **pubsub:** add exactly once delivery flag ([f71dc3d](https://www.github.com/googleapis/google-cloud-go/commit/f71dc3dfefa54ab41861aea15971108850a9f98b)) + + +### Bug Fixes + +* **pubsub:** add deadletter and retries handling in the fake pubsub ([#5320](https://www.github.com/googleapis/google-cloud-go/issues/5320)) ([116a610](https://www.github.com/googleapis/google-cloud-go/commit/116a61008e174e5d49b9485d78bc13f64461322f)) +* **pubsub:** pass context into checkOrdering to allow cancel ([#5316](https://www.github.com/googleapis/google-cloud-go/issues/5316)) ([fc08c49](https://www.github.com/googleapis/google-cloud-go/commit/fc08c49fc013cbad00642bbba317e02f0ba15a6d)) + +### [1.17.1](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.17.0...pubsub/v1.17.1) (2021-10-25) + + +### Bug Fixes + +* **pubsub:** add methods to allow retrieval of topic/sub config names ([#4953](https://www.github.com/googleapis/google-cloud-go/issues/4953)) ([bff5b1c](https://www.github.com/googleapis/google-cloud-go/commit/bff5b1ca331a0d193407a0f3eb501772cbb8ba78)) +* **pubsub:** prevent draining error return for Receive ([#4733](https://www.github.com/googleapis/google-cloud-go/issues/4733)) ([c6d5189](https://www.github.com/googleapis/google-cloud-go/commit/c6d51891649d8169089a0a2b7365ea54f991af56)) +* **pubsub:** tag ctx in iterator with subscription for opencensus ([#5011](https://www.github.com/googleapis/google-cloud-go/issues/5011)) ([cdf9588](https://www.github.com/googleapis/google-cloud-go/commit/cdf958864e278bb394cc548cb5f15ad08859f347)) + +## [1.17.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.16.0...pubsub/v1.17.0) (2021-09-08) + + +### Features + +* **pubsub:** add list configs for topic & sub ([#4607](https://www.github.com/googleapis/google-cloud-go/issues/4607)) ([a6550c5](https://www.github.com/googleapis/google-cloud-go/commit/a6550c5dfb381e286fea6a905dc658c4a865d643)) +* **pubsub:** add publisher flow control support ([#4292](https://www.github.com/googleapis/google-cloud-go/issues/4292)) ([bff24c3](https://www.github.com/googleapis/google-cloud-go/commit/bff24c3a62a2f037c1ccef14986f917e41953734)) + +## [1.16.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.15.0...pubsub/v1.16.0) (2021-08-24) + + +### Features + +* **pubsub:** add topic message retention duration ([#4520](https://www.github.com/googleapis/google-cloud-go/issues/4520)) ([0440336](https://www.github.com/googleapis/google-cloud-go/commit/0440336c988a4401cbdb5d85a8cc7fca388831e5)) + +## [1.15.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.14.0...pubsub/v1.15.0) (2021-08-13) + + +### Features + +* **pubsub:** Add topic retention options ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) + + +### Bug Fixes + +* **pubsub:** always make config check to prevent race ([#4606](https://www.github.com/googleapis/google-cloud-go/issues/4606)) ([8cfcf53](https://www.github.com/googleapis/google-cloud-go/commit/8cfcf53d03b9b442e7f0bc1c1b20c791e31c07b0)), refs [#3626](https://www.github.com/googleapis/google-cloud-go/issues/3626) +* **pubsub:** mitigate race in checking ordering config ([#4602](https://www.github.com/googleapis/google-cloud-go/issues/4602)) ([112eea2](https://www.github.com/googleapis/google-cloud-go/commit/112eea20b46bbc34e5f8f65b9812fb3e60107409)), refs [#3626](https://www.github.com/googleapis/google-cloud-go/issues/3626) +* **pubsub:** replace IAMPolicy in API config ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) + +## [1.14.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.13.0...pubsub/v1.14.0) (2021-08-09) + + +### Features + +* **pubsub:** expose CallOptions for pub/sub retries and timeouts ([#4428](https://www.github.com/googleapis/google-cloud-go/issues/4428)) ([8b99dd3](https://www.github.com/googleapis/google-cloud-go/commit/8b99dd356475a750000c06a44fc7b8423d703967)) + +## [1.13.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.12.2...pubsub/v1.13.0) (2021-07-20) + + +### Features + +* **pubsub/pstest:** add ability to create a pstest server listening on ([#4459](https://www.github.com/googleapis/google-cloud-go/issues/4459)) ([f1b7c8b](https://www.github.com/googleapis/google-cloud-go/commit/f1b7c8b33bc135c6cb8f21cdec586b25d81ea214)) + +### [1.12.2](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.12.1...pubsub/v1.12.2) (2021-07-08) + + +### Bug Fixes + +* **pubsub:** retry all goaway errors ([#4384](https://www.github.com/googleapis/google-cloud-go/issues/4384)) ([1eae86f](https://www.github.com/googleapis/google-cloud-go/commit/1eae86f1882660d901b9fb0e8dab6f138a048dbb)), refs [#4257](https://www.github.com/googleapis/google-cloud-go/issues/4257) + +### [1.12.1](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.12.0...pubsub/v1.12.1) (2021-07-01) + + +### Bug Fixes + +* **pubsub:** retry GOAWAY errors ([#4313](https://www.github.com/googleapis/google-cloud-go/issues/4313)) ([7076fef](https://www.github.com/googleapis/google-cloud-go/commit/7076fef5fef81cce47dbfbab3d7257cc7d3776bc)) + +## [1.12.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.11.0...pubsub/v1.12.0) (2021-06-23) + + +### Features + +* **pubsub/pstest:** add channel to support user-defined publish responses ([#4251](https://www.github.com/googleapis/google-cloud-go/issues/4251)) ([e1304f4](https://www.github.com/googleapis/google-cloud-go/commit/e1304f435fed4a767f4a652f32f1386979ff794f)) + + +### Bug Fixes + +* **pubsub:** fix memory leak issue in publish scheduler ([#4282](https://www.github.com/googleapis/google-cloud-go/issues/4282)) ([22ffc18](https://www.github.com/googleapis/google-cloud-go/commit/22ffc18e522c0f943db57f8c943e7356067bedfd)) + +## [1.11.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.10.3...pubsub/v1.11.0) (2021-05-27) + + +### Features + +* **pubsub:** add flush method to topic ([#2863](https://www.github.com/googleapis/google-cloud-go/issues/2863)) ([825ddd6](https://www.github.com/googleapis/google-cloud-go/commit/825ddd692363eb2dd8cd253cc5976867e432f547)) + +### [1.10.3](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.10.2...pubsub/v1.10.3) (2021-04-23) + + +### Bug Fixes + +* **pubsub:** fix failing message storage policy tests ([#4003](https://www.github.com/googleapis/google-cloud-go/issues/4003)) ([8946158](https://www.github.com/googleapis/google-cloud-go/commit/8946158561e1599c164021364e7fcb2a4c4d2f3d)) +* **pubsub:** make config call permission error in Receive transparent ([#3985](https://www.github.com/googleapis/google-cloud-go/issues/3985)) ([a1614db](https://www.github.com/googleapis/google-cloud-go/commit/a1614db35a51d21c52bcba5e805071381d8f5133)) + +### [1.10.2](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.10.1...pubsub/v1.10.2) (2021-04-08) + + +### Bug Fixes + +* **pubsub:** respect subscription message ordering field in scheduler ([#3886](https://www.github.com/googleapis/google-cloud-go/issues/3886)) ([1fcc78a](https://www.github.com/googleapis/google-cloud-go/commit/1fcc78ac6ecb461c3bbede9667436614c9df1535)) +* **pubsub:** update quiescenceDur in failing e2e test ([#3780](https://www.github.com/googleapis/google-cloud-go/issues/3780)) ([97e6c69](https://www.github.com/googleapis/google-cloud-go/commit/97e6c696c39bf4cf49fa5ef51145cfcb2a1a5d71)) + +### [1.10.1](https://www.github.com/googleapis/google-cloud-go/compare/v1.10.0...v1.10.1) (2021-03-04) + + +### Bug Fixes + +* **pubsub:** hide context.Cancelled error in sync pull ([#3752](https://www.github.com/googleapis/google-cloud-go/issues/3752)) ([f88bdc8](https://www.github.com/googleapis/google-cloud-go/commit/f88bdc85072e5ad511a907d98207ebf7d22e9df7)) + +## [1.10.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.9.1...v1.10.0) (2021-02-10) + + +### Features + +* **pubsub:** add opencensus metrics for outstanding messages/bytes ([#3690](https://www.github.com/googleapis/google-cloud-go/issues/3690)) ([4039b82](https://www.github.com/googleapis/google-cloud-go/commit/4039b82e95b3a8ba2322d1f4fe9e2c21b087a907)) + +### [1.9.1](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.9.0...v1.9.1) (2020-12-10) + +### Bug Fixes + +* **pubsub:** fix default stream ack deadline seconds ([#3430](https://www.github.com/googleapis/google-cloud-go/issues/3430)) ([a10263a](https://www.github.com/googleapis/google-cloud-go/commit/a10263adc2ec9483ecedd0bf0b028863342ea760)) +* **pubsub:** respect streamAckDeadlineSeconds with MaxExtensionPeriod ([#3367](https://www.github.com/googleapis/google-cloud-go/issues/3367)) ([45131b6](https://www.github.com/googleapis/google-cloud-go/commit/45131b6c526ded2964ffd067c4a5420d508f0b1a)) + +## [1.9.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.8.3...v1.9.0) (2020-12-03) + +### Features + +- **pubsub:** Enable server side flow control by default with the option to turn it off ([#3154](https://www.github.com/googleapis/google-cloud-go/issues/3154)) ([e392e61](https://www.github.com/googleapis/google-cloud-go/commit/e392e6157ee02a344528de63ab16baba61470b24)) + +### Refactor + +**NOTE**: Several changes were proposed for allowing `Message` and `PublishResult` to be used outside the library. However, the decision was made to only allow packages in `google-cloud-go` to access `NewMessage` and `NewPublishResult` (see #3351). + +- **pubsub:** Allow Message and PublishResult to be used outside the package ([#3200](https://www.github.com/googleapis/google-cloud-go/issues/3200)) ([581bf92](https://www.github.com/googleapis/google-cloud-go/commit/581bf92878dcb52ae8ea3633d4b3fcbb7054ff0f)) +- **pubsub:** Remove NewMessage and NewPublishResult ([#3232](https://www.github.com/googleapis/google-cloud-go/issues/3232)) ([a781a3a](https://www.github.com/googleapis/google-cloud-go/commit/a781a3ad0c626fc0a7aff0ce33b1ef0830ee2259)) + +### [1.8.3](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.8.2...v1.8.3) (2020-11-10) + +### Bug Fixes + +- **pubsub:** retry deadline exceeded errors in Acknowledge ([#3157](https://www.github.com/googleapis/google-cloud-go/issues/3157)) ([ae75b46](https://www.github.com/googleapis/google-cloud-go/commit/ae75b46033d9f14f41c1bde4b9646c93f8e2bbad)) + +## v1.8.2 + +- Fixes: + - fix(pubsub): track errors in published messages opencensus metric (#2970) + - fix(pubsub): do not propagate context deadline exceeded error (#3055) + +## v1.8.1 + +- Suppress connection is closing on error on subscriber close. (#2951) + +## v1.8.0 + +- Add status code to error injection in pstest. This is a BREAKING CHANGE. + +## v1.7.0 + +- Add reactor options to pstest server. (#2916) + +## v1.6.2 + +- Make message.Modacks thread safe in pstest. (#2755) +- Fix issue with closing publisher and subscriber client errors. (#2867) +- Fix updating subscription filtering/retry policy in pstest. (#2901) + +## v1.6.1 + +- Fix issue where EnableMessageOrdering wasn't being parsed properly to `SubscriptionConfig`. + +## v1.6.0 + +- Fix issue where subscriber streams were limited because it was using a single grpc conn. + - As a side effect, publisher and subscriber grpc conns are no longer shared. +- Add fake time function in pstest. +- Add support for server side flow control. + +## v1.5.0 + +- Add support for subscription detachment. +- Add support for message filtering in subscriptions. +- Add support for RetryPolicy (server-side feature). +- Fix publish error path when ordering key is disabled. +- Fix panic on Topic.ResumePublish method. + +## v1.4.0 + +- Add support for upcoming ordering keys feature. + ## v1.3.1 - Fix bug with removing dead letter policy from a subscription diff --git a/vendor/cloud.google.com/go/pubsub/README.md b/vendor/cloud.google.com/go/pubsub/README.md index 59f4cf66d9d25..cc3d2dccb834b 100644 --- a/vendor/cloud.google.com/go/pubsub/README.md +++ b/vendor/cloud.google.com/go/pubsub/README.md @@ -1,9 +1,9 @@ -## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub) +## Cloud Pub/Sub [![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/pubsub.svg)](https://pkg.go.dev/cloud.google.com/go/pubsub) - [About Cloud Pubsub](https://cloud.google.com/pubsub/) - [API documentation](https://cloud.google.com/pubsub/docs) -- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub) -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub) +- [Go client documentation](https://pkg.go.dev/cloud.google.com/go/pubsub) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/pubsub) ### Example Usage @@ -43,4 +43,4 @@ err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { if err != nil { log.Println(err) } -``` \ No newline at end of file +``` diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/doc.go b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go index 378e9b72cc816..e7a00c124e118 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/doc.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,28 +20,90 @@ // Provides reliable, many-to-many, asynchronous messaging between // applications. // -// Use of Context +// # Example usage // -// The ctx passed to NewClient is used for authentication requests and +// To get started with this package, create a client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := pubsub.NewSchemaClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// The client will use your default application credentials. Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +// The returned client must be Closed when it is done being used. +// +// # Using the Client +// +// The following is an example of making an API call with the newly created client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := pubsub.NewSchemaClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// req := &pubsubpb.CreateSchemaRequest{ +// // TODO: Fill request struct fields. +// // See https://pkg.go.dev/google.golang.org/genproto/googleapis/pubsub/v1#CreateSchemaRequest. +// } +// resp, err := c.CreateSchema(ctx, req) +// if err != nil { +// // TODO: Handle error. +// } +// // TODO: Use resp. +// _ = resp +// +// # Use of Context +// +// The ctx passed to NewSchemaClient is used for authentication requests and // for creating the underlying connection, but is not used for subsequent calls. // Individual methods on the client use the ctx given to them. // // To close the open connection, use the Close() method. // // For information about setting deadlines, reusing contexts, and more -// please visit godoc.org/cloud.google.com/go. +// please visit https://pkg.go.dev/cloud.google.com/go. package pubsub // import "cloud.google.com/go/pubsub/apiv1" import ( "context" + "os" "runtime" + "strconv" "strings" "unicode" + "google.golang.org/api/option" "google.golang.org/grpc/metadata" ) -const versionClient = "20200312" +// For more information on implementing a client constructor hook, see +// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. +type clientHookParams struct{} +type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) + +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { out, _ := metadata.FromOutgoingContext(ctx) @@ -54,6 +116,16 @@ func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { return metadata.NewOutgoingContext(ctx, out) } +func checkDisableDeadlines() (bool, error) { + raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE") + if !ok { + return false, nil + } + + b, err := strconv.ParseBool(raw) + return b, err +} + // DefaultAuthScopes reports the default set of authentication scopes to use with this package. func DefaultAuthScopes() []string { return []string{ diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/gapic_metadata.json b/vendor/cloud.google.com/go/pubsub/apiv1/gapic_metadata.json new file mode 100644 index 0000000000000..64b2999668a61 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/gapic_metadata.json @@ -0,0 +1,236 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", + "language": "go", + "protoPackage": "google.pubsub.v1", + "libraryPackage": "cloud.google.com/go/pubsub/apiv1", + "services": { + "Publisher": { + "clients": { + "grpc": { + "libraryClient": "PublisherClient", + "rpcs": { + "CreateTopic": { + "methods": [ + "CreateTopic" + ] + }, + "DeleteTopic": { + "methods": [ + "DeleteTopic" + ] + }, + "DetachSubscription": { + "methods": [ + "DetachSubscription" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetTopic": { + "methods": [ + "GetTopic" + ] + }, + "ListTopicSnapshots": { + "methods": [ + "ListTopicSnapshots" + ] + }, + "ListTopicSubscriptions": { + "methods": [ + "ListTopicSubscriptions" + ] + }, + "ListTopics": { + "methods": [ + "ListTopics" + ] + }, + "Publish": { + "methods": [ + "Publish" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "UpdateTopic": { + "methods": [ + "UpdateTopic" + ] + } + } + } + } + }, + "SchemaService": { + "clients": { + "grpc": { + "libraryClient": "SchemaClient", + "rpcs": { + "CreateSchema": { + "methods": [ + "CreateSchema" + ] + }, + "DeleteSchema": { + "methods": [ + "DeleteSchema" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetSchema": { + "methods": [ + "GetSchema" + ] + }, + "ListSchemas": { + "methods": [ + "ListSchemas" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "ValidateMessage": { + "methods": [ + "ValidateMessage" + ] + }, + "ValidateSchema": { + "methods": [ + "ValidateSchema" + ] + } + } + } + } + }, + "Subscriber": { + "clients": { + "grpc": { + "libraryClient": "SubscriberClient", + "rpcs": { + "Acknowledge": { + "methods": [ + "Acknowledge" + ] + }, + "CreateSnapshot": { + "methods": [ + "CreateSnapshot" + ] + }, + "CreateSubscription": { + "methods": [ + "CreateSubscription" + ] + }, + "DeleteSnapshot": { + "methods": [ + "DeleteSnapshot" + ] + }, + "DeleteSubscription": { + "methods": [ + "DeleteSubscription" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetSnapshot": { + "methods": [ + "GetSnapshot" + ] + }, + "GetSubscription": { + "methods": [ + "GetSubscription" + ] + }, + "ListSnapshots": { + "methods": [ + "ListSnapshots" + ] + }, + "ListSubscriptions": { + "methods": [ + "ListSubscriptions" + ] + }, + "ModifyAckDeadline": { + "methods": [ + "ModifyAckDeadline" + ] + }, + "ModifyPushConfig": { + "methods": [ + "ModifyPushConfig" + ] + }, + "Pull": { + "methods": [ + "Pull" + ] + }, + "Seek": { + "methods": [ + "Seek" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "StreamingPull": { + "methods": [ + "StreamingPull" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "UpdateSnapshot": { + "methods": [ + "UpdateSnapshot" + ] + }, + "UpdateSubscription": { + "methods": [ + "UpdateSubscription" + ] + } + } + } + } + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/info.go b/vendor/cloud.google.com/go/pubsub/apiv1/info.go new file mode 100644 index 0000000000000..fde99d8d0c534 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/info.go @@ -0,0 +1,33 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Also passes any +// provided key-value pairs. Intended for use by Google-written clients. +// +// Internal use only. +func (pc *PublisherClient) SetGoogleClientInfo(keyval ...string) { + pc.setGoogleClientInfo(keyval...) +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Also passes any +// provided key-value pairs. Intended for use by Google-written clients. +// +// Internal use only. +func (sc *SubscriberClient) SetGoogleClientInfo(keyval ...string) { + sc.setGoogleClientInfo(keyval...) +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go b/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go index b9ab4848db1b8..7416e376d18e0 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go @@ -17,7 +17,9 @@ package pubsub // PublisherProjectPath returns the path for the project resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s", project) +// +// fmt.Sprintf("projects/%s", project) +// // instead. func PublisherProjectPath(project string) string { return "" + @@ -29,7 +31,9 @@ func PublisherProjectPath(project string) string { // PublisherTopicPath returns the path for the topic resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s/topics/%s", project, topic) +// +// fmt.Sprintf("projects/%s/topics/%s", project, topic) +// // instead. func PublisherTopicPath(project, topic string) string { return "" + @@ -43,7 +47,9 @@ func PublisherTopicPath(project, topic string) string { // SubscriberProjectPath returns the path for the project resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s", project) +// +// fmt.Sprintf("projects/%s", project) +// // instead. func SubscriberProjectPath(project string) string { return "" + @@ -55,7 +61,9 @@ func SubscriberProjectPath(project string) string { // SubscriberSnapshotPath returns the path for the snapshot resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s/snapshots/%s", project, snapshot) +// +// fmt.Sprintf("projects/%s/snapshots/%s", project, snapshot) +// // instead. func SubscriberSnapshotPath(project, snapshot string) string { return "" + @@ -69,7 +77,9 @@ func SubscriberSnapshotPath(project, snapshot string) string { // SubscriberSubscriptionPath returns the path for the subscription resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s/subscriptions/%s", project, subscription) +// +// fmt.Sprintf("projects/%s/subscriptions/%s", project, subscription) +// // instead. func SubscriberSubscriptionPath(project, subscription string) string { return "" + @@ -83,7 +93,9 @@ func SubscriberSubscriptionPath(project, subscription string) string { // SubscriberTopicPath returns the path for the topic resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s/topics/%s", project, topic) +// +// fmt.Sprintf("projects/%s/topics/%s", project, topic) +// // instead. func SubscriberTopicPath(project, topic string) string { return "" + diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go index 96c392f826bcf..11b1298de1cc0 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,17 +23,21 @@ import ( "net/url" "time" - "github.com/golang/protobuf/proto" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" + iampb "google.golang.org/genproto/googleapis/iam/v1" pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" ) +var newPublisherClientHook clientHook + // PublisherCallOptions contains the retry settings for each method of PublisherClient. type PublisherCallOptions struct { CreateTopic []gax.CallOption @@ -44,13 +48,19 @@ type PublisherCallOptions struct { ListTopicSubscriptions []gax.CallOption ListTopicSnapshots []gax.CallOption DeleteTopic []gax.CallOption + DetachSubscription []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption } -func defaultPublisherClientOptions() []option.ClientOption { +func defaultPublisherGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ - option.WithEndpoint("pubsub.googleapis.com:443"), - option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), - option.WithScopes(DefaultAuthScopes()...), + internaloption.WithDefaultEndpoint("pubsub.googleapis.com:443"), + internaloption.WithDefaultMTLSEndpoint("pubsub.mtls.googleapis.com:443"), + internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -160,75 +170,258 @@ func defaultPublisherCallOptions() *PublisherCallOptions { }) }), }, + DetachSubscription: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, } } +// internalPublisherClient is an interface that defines the methods available from Cloud Pub/Sub API. +type internalPublisherClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + CreateTopic(context.Context, *pubsubpb.Topic, ...gax.CallOption) (*pubsubpb.Topic, error) + UpdateTopic(context.Context, *pubsubpb.UpdateTopicRequest, ...gax.CallOption) (*pubsubpb.Topic, error) + Publish(context.Context, *pubsubpb.PublishRequest, ...gax.CallOption) (*pubsubpb.PublishResponse, error) + GetTopic(context.Context, *pubsubpb.GetTopicRequest, ...gax.CallOption) (*pubsubpb.Topic, error) + ListTopics(context.Context, *pubsubpb.ListTopicsRequest, ...gax.CallOption) *TopicIterator + ListTopicSubscriptions(context.Context, *pubsubpb.ListTopicSubscriptionsRequest, ...gax.CallOption) *StringIterator + ListTopicSnapshots(context.Context, *pubsubpb.ListTopicSnapshotsRequest, ...gax.CallOption) *StringIterator + DeleteTopic(context.Context, *pubsubpb.DeleteTopicRequest, ...gax.CallOption) error + DetachSubscription(context.Context, *pubsubpb.DetachSubscriptionRequest, ...gax.CallOption) (*pubsubpb.DetachSubscriptionResponse, error) + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) +} + // PublisherClient is a client for interacting with Cloud Pub/Sub API. -// // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The service that an application uses to manipulate topics, and to send +// messages to a topic. type PublisherClient struct { + // The internal transport-dependent client. + internalClient internalPublisherClient + + // The call options for this service. + CallOptions *PublisherCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *PublisherClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *PublisherClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *PublisherClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// CreateTopic creates the given topic with the given name. See the [resource name rules] +// (https://cloud.google.com/pubsub/docs/admin#resource_names (at https://cloud.google.com/pubsub/docs/admin#resource_names)). +func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + return c.internalClient.CreateTopic(ctx, req, opts...) +} + +// UpdateTopic updates an existing topic. Note that certain properties of a +// topic are not modifiable. +func (c *PublisherClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + return c.internalClient.UpdateTopic(ctx, req, opts...) +} + +// Publish adds one or more messages to the topic. Returns NOT_FOUND if the topic +// does not exist. +func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) { + return c.internalClient.Publish(ctx, req, opts...) +} + +// GetTopic gets the configuration of a topic. +func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + return c.internalClient.GetTopic(ctx, req, opts...) +} + +// ListTopics lists matching topics. +func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest, opts ...gax.CallOption) *TopicIterator { + return c.internalClient.ListTopics(ctx, req, opts...) +} + +// ListTopicSubscriptions lists the names of the attached subscriptions on this topic. +func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest, opts ...gax.CallOption) *StringIterator { + return c.internalClient.ListTopicSubscriptions(ctx, req, opts...) +} + +// ListTopicSnapshots lists the names of the snapshots on this topic. Snapshots are used in +// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) operations, +// which allow you to manage message acknowledgments in bulk. That is, you can +// set the acknowledgment state of messages in an existing subscription to the +// state captured by a snapshot. +func (c *PublisherClient) ListTopicSnapshots(ctx context.Context, req *pubsubpb.ListTopicSnapshotsRequest, opts ...gax.CallOption) *StringIterator { + return c.internalClient.ListTopicSnapshots(ctx, req, opts...) +} + +// DeleteTopic deletes the topic with the given name. Returns NOT_FOUND if the topic +// does not exist. After a topic is deleted, a new topic may be created with +// the same name; this is an entirely new topic with none of the old +// configuration or subscriptions. Existing subscriptions to this topic are +// not deleted, but their topic field is set to _deleted-topic_. +func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteTopic(ctx, req, opts...) +} + +// DetachSubscription detaches a subscription from this topic. All messages retained in the +// subscription are dropped. Subsequent Pull and StreamingPull requests +// will return FAILED_PRECONDITION. If the subscription is a push +// subscription, pushes to the endpoint will stop. +func (c *PublisherClient) DetachSubscription(ctx context.Context, req *pubsubpb.DetachSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.DetachSubscriptionResponse, error) { + return c.internalClient.DetachSubscription(ctx, req, opts...) +} + +// GetIamPolicy gets the access control policy for a resource. Returns an empty policy +// if the resource exists and does not have a policy set. +func (c *PublisherClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.GetIamPolicy(ctx, req, opts...) +} + +// SetIamPolicy sets the access control policy on the specified resource. Replaces +// any existing policy. +// +// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED +// errors. +func (c *PublisherClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.SetIamPolicy(ctx, req, opts...) +} + +// TestIamPermissions returns permissions that a caller has on the specified resource. If the +// resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware UIs and command-line tools, not for authorization +// checking. This operation may “fail open” without warning. +func (c *PublisherClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + return c.internalClient.TestIamPermissions(ctx, req, opts...) +} + +// publisherGRPCClient is a client for interacting with Cloud Pub/Sub API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type publisherGRPCClient struct { // Connection pool of gRPC connections to the service. connPool gtransport.ConnPool + // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE + disableDeadlines bool + + // Points back to the CallOptions field of the containing PublisherClient + CallOptions **PublisherCallOptions + // The gRPC API client. publisherClient pubsubpb.PublisherClient - // The call options for this service. - CallOptions *PublisherCallOptions + iamPolicyClient iampb.IAMPolicyClient // The x-goog-* metadata to be sent with each request. xGoogMetadata metadata.MD } -// NewPublisherClient creates a new publisher client. +// NewPublisherClient creates a new publisher client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. // // The service that an application uses to manipulate topics, and to send // messages to a topic. func NewPublisherClient(ctx context.Context, opts ...option.ClientOption) (*PublisherClient, error) { - connPool, err := gtransport.DialPool(ctx, append(defaultPublisherClientOptions(), opts...)...) + clientOpts := defaultPublisherGRPCClientOptions() + if newPublisherClientHook != nil { + hookOpts, err := newPublisherClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + disableDeadlines, err := checkDisableDeadlines() if err != nil { return nil, err } - c := &PublisherClient{ - connPool: connPool, - CallOptions: defaultPublisherCallOptions(), - publisherClient: pubsubpb.NewPublisherClient(connPool), + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err } - c.SetGoogleClientInfo() + client := PublisherClient{CallOptions: defaultPublisherCallOptions()} + + c := &publisherGRPCClient{ + connPool: connPool, + disableDeadlines: disableDeadlines, + publisherClient: pubsubpb.NewPublisherClient(connPool), + CallOptions: &client.CallOptions, + iamPolicyClient: iampb.NewIAMPolicyClient(connPool), + } + c.setGoogleClientInfo() + + client.internalClient = c - return c, nil + return &client, nil } // Connection returns a connection to the API service. // -// Deprecated. -func (c *PublisherClient) Connection() *grpc.ClientConn { +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *publisherGRPCClient) Connection() *grpc.ClientConn { return c.connPool.Conn() } -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *PublisherClient) Close() error { - return c.connPool.Close() -} - -// SetGoogleClientInfo sets the name and version of the application in +// setGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *PublisherClient) SetGoogleClientInfo(keyval ...string) { +func (c *publisherGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } -// CreateTopic creates the given topic with the given name. See the -// -// resource name rules (at https://cloud.google.com/pubsub/docs/admin#resource_names). -func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, opts ...gax.CallOption) (*pubsubpb.Topic, error) { +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *publisherGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *publisherGRPCClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateTopic[0:len(c.CallOptions.CreateTopic):len(c.CallOptions.CreateTopic)], opts...) + opts = append((*c.CallOptions).CreateTopic[0:len((*c.CallOptions).CreateTopic):len((*c.CallOptions).CreateTopic)], opts...) var resp *pubsubpb.Topic err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -241,12 +434,16 @@ func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, return resp, nil } -// UpdateTopic updates an existing topic. Note that certain properties of a -// topic are not modifiable. -func (c *PublisherClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { +func (c *publisherGRPCClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic.name", url.QueryEscape(req.GetTopic().GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateTopic[0:len(c.CallOptions.UpdateTopic):len(c.CallOptions.UpdateTopic)], opts...) + opts = append((*c.CallOptions).UpdateTopic[0:len((*c.CallOptions).UpdateTopic):len((*c.CallOptions).UpdateTopic)], opts...) var resp *pubsubpb.Topic err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -259,12 +456,16 @@ func (c *PublisherClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateT return resp, nil } -// Publish adds one or more messages to the topic. Returns NOT_FOUND if the topic -// does not exist. -func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) { +func (c *publisherGRPCClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.Publish[0:len(c.CallOptions.Publish):len(c.CallOptions.Publish)], opts...) + opts = append((*c.CallOptions).Publish[0:len((*c.CallOptions).Publish):len((*c.CallOptions).Publish)], opts...) var resp *pubsubpb.PublishResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -277,11 +478,16 @@ func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequ return resp, nil } -// GetTopic gets the configuration of a topic. -func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { +func (c *publisherGRPCClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetTopic[0:len(c.CallOptions.GetTopic):len(c.CallOptions.GetTopic)], opts...) + opts = append((*c.CallOptions).GetTopic[0:len((*c.CallOptions).GetTopic):len((*c.CallOptions).GetTopic)], opts...) var resp *pubsubpb.Topic err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -294,19 +500,21 @@ func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRe return resp, nil } -// ListTopics lists matching topics. -func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest, opts ...gax.CallOption) *TopicIterator { +func (c *publisherGRPCClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest, opts ...gax.CallOption) *TopicIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListTopics[0:len(c.CallOptions.ListTopics):len(c.CallOptions.ListTopics)], opts...) + opts = append((*c.CallOptions).ListTopics[0:len((*c.CallOptions).ListTopics):len((*c.CallOptions).ListTopics)], opts...) it := &TopicIterator{} req = proto.Clone(req).(*pubsubpb.ListTopicsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Topic, string, error) { - var resp *pubsubpb.ListTopicsResponse - req.PageToken = pageToken + resp := &pubsubpb.ListTopicsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -319,7 +527,7 @@ func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopi } it.Response = resp - return resp.Topics, resp.NextPageToken, nil + return resp.GetTopics(), resp.GetNextPageToken(), nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) @@ -329,25 +537,29 @@ func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopi it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + return it } -// ListTopicSubscriptions lists the names of the subscriptions on this topic. -func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest, opts ...gax.CallOption) *StringIterator { +func (c *publisherGRPCClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest, opts ...gax.CallOption) *StringIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListTopicSubscriptions[0:len(c.CallOptions.ListTopicSubscriptions):len(c.CallOptions.ListTopicSubscriptions)], opts...) + opts = append((*c.CallOptions).ListTopicSubscriptions[0:len((*c.CallOptions).ListTopicSubscriptions):len((*c.CallOptions).ListTopicSubscriptions)], opts...) it := &StringIterator{} req = proto.Clone(req).(*pubsubpb.ListTopicSubscriptionsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { - var resp *pubsubpb.ListTopicSubscriptionsResponse - req.PageToken = pageToken + resp := &pubsubpb.ListTopicSubscriptionsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -360,7 +572,7 @@ func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsu } it.Response = resp - return resp.Subscriptions, resp.NextPageToken, nil + return resp.GetSubscriptions(), resp.GetNextPageToken(), nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) @@ -370,30 +582,29 @@ func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsu it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + return it } -// ListTopicSnapshots lists the names of the snapshots on this topic. Snapshots are used in -// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) -// operations, which allow -// you to manage message acknowledgments in bulk. That is, you can set the -// acknowledgment state of messages in an existing subscription to the state -// captured by a snapshot. -func (c *PublisherClient) ListTopicSnapshots(ctx context.Context, req *pubsubpb.ListTopicSnapshotsRequest, opts ...gax.CallOption) *StringIterator { +func (c *publisherGRPCClient) ListTopicSnapshots(ctx context.Context, req *pubsubpb.ListTopicSnapshotsRequest, opts ...gax.CallOption) *StringIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListTopicSnapshots[0:len(c.CallOptions.ListTopicSnapshots):len(c.CallOptions.ListTopicSnapshots)], opts...) + opts = append((*c.CallOptions).ListTopicSnapshots[0:len((*c.CallOptions).ListTopicSnapshots):len((*c.CallOptions).ListTopicSnapshots)], opts...) it := &StringIterator{} req = proto.Clone(req).(*pubsubpb.ListTopicSnapshotsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { - var resp *pubsubpb.ListTopicSnapshotsResponse - req.PageToken = pageToken + resp := &pubsubpb.ListTopicSnapshotsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -406,7 +617,7 @@ func (c *PublisherClient) ListTopicSnapshots(ctx context.Context, req *pubsubpb. } it.Response = resp - return resp.Snapshots, resp.NextPageToken, nil + return resp.GetSnapshots(), resp.GetNextPageToken(), nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) @@ -416,21 +627,24 @@ func (c *PublisherClient) ListTopicSnapshots(ctx context.Context, req *pubsubpb. it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + return it } -// DeleteTopic deletes the topic with the given name. Returns NOT_FOUND if the topic -// does not exist. After a topic is deleted, a new topic may be created with -// the same name; this is an entirely new topic with none of the old -// configuration or subscriptions. Existing subscriptions to this topic are -// not deleted, but their topic field is set to _deleted-topic_. -func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error { +func (c *publisherGRPCClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteTopic[0:len(c.CallOptions.DeleteTopic):len(c.CallOptions.DeleteTopic)], opts...) + opts = append((*c.CallOptions).DeleteTopic[0:len((*c.CallOptions).DeleteTopic):len((*c.CallOptions).DeleteTopic)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.publisherClient.DeleteTopic(ctx, req, settings.GRPC...) @@ -439,6 +653,79 @@ func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteT return err } +func (c *publisherGRPCClient) DetachSubscription(ctx context.Context, req *pubsubpb.DetachSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.DetachSubscriptionResponse, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).DetachSubscription[0:len((*c.CallOptions).DetachSubscription):len((*c.CallOptions).DetachSubscription)], opts...) + var resp *pubsubpb.DetachSubscriptionResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.DetachSubscription(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *publisherGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *publisherGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *publisherGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + // StringIterator manages a stream of string. type StringIterator struct { items []string diff --git a/vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go b/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go similarity index 99% rename from vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go rename to vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go index 3d8955171057c..84fd669008c94 100644 --- a/vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/pubsub/v1/pubsub.proto -package pubsub +package pubsubpb import ( context "context" @@ -1778,6 +1778,7 @@ type PushConfig struct { // authenticated push. // // Types that are assignable to AuthenticationMethod: + // // *PushConfig_OidcToken_ AuthenticationMethod isPushConfig_AuthenticationMethod `protobuf_oneof:"authentication_method"` } @@ -2905,12 +2906,14 @@ type CreateSnapshotRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Required. The subscription whose backlog the snapshot retains. // Specifically, the created snapshot is guaranteed to retain: - // (a) The existing backlog on the subscription. More precisely, this is - // defined as the messages in the subscription's backlog that are - // unacknowledged upon the successful completion of the - // `CreateSnapshot` request; as well as: - // (b) Any messages published to the subscription's topic following the - // successful completion of the CreateSnapshot request. + // + // (a) The existing backlog on the subscription. More precisely, this is + // defined as the messages in the subscription's backlog that are + // unacknowledged upon the successful completion of the + // `CreateSnapshot` request; as well as: + // (b) Any messages published to the subscription's topic following the + // successful completion of the CreateSnapshot request. + // // Format is `projects/{project}/subscriptions/{sub}`. Subscription string `protobuf:"bytes,2,opt,name=subscription,proto3" json:"subscription,omitempty"` // See Creating and @@ -3358,6 +3361,7 @@ type SeekRequest struct { // Required. The subscription to affect. Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` // Types that are assignable to Target: + // // *SeekRequest_Time // *SeekRequest_Snapshot Target isSeekRequest_Target `protobuf_oneof:"target"` diff --git a/vendor/google.golang.org/genproto/googleapis/pubsub/v1/schema.pb.go b/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/schema.pb.go similarity index 99% rename from vendor/google.golang.org/genproto/googleapis/pubsub/v1/schema.pb.go rename to vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/schema.pb.go index 502989b1dfa75..814159324a5aa 100644 --- a/vendor/google.golang.org/genproto/googleapis/pubsub/v1/schema.pb.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/schema.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/pubsub/v1/schema.proto -package pubsub +package pubsubpb import ( context "context" @@ -708,6 +708,7 @@ type ValidateMessageRequest struct { // Format is `projects/{project-id}`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Types that are assignable to SchemaSpec: + // // *ValidateMessageRequest_Name // *ValidateMessageRequest_Schema SchemaSpec isValidateMessageRequest_SchemaSpec `protobuf_oneof:"schema_spec"` diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go new file mode 100644 index 0000000000000..b905d25d4a5a1 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go @@ -0,0 +1,491 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package pubsub + +import ( + "context" + "fmt" + "math" + "net/url" + + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + iampb "google.golang.org/genproto/googleapis/iam/v1" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" +) + +var newSchemaClientHook clientHook + +// SchemaCallOptions contains the retry settings for each method of SchemaClient. +type SchemaCallOptions struct { + CreateSchema []gax.CallOption + GetSchema []gax.CallOption + ListSchemas []gax.CallOption + DeleteSchema []gax.CallOption + ValidateSchema []gax.CallOption + ValidateMessage []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption +} + +func defaultSchemaGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("pubsub.googleapis.com:443"), + internaloption.WithDefaultMTLSEndpoint("pubsub.mtls.googleapis.com:443"), + internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultSchemaCallOptions() *SchemaCallOptions { + return &SchemaCallOptions{ + CreateSchema: []gax.CallOption{}, + GetSchema: []gax.CallOption{}, + ListSchemas: []gax.CallOption{}, + DeleteSchema: []gax.CallOption{}, + ValidateSchema: []gax.CallOption{}, + ValidateMessage: []gax.CallOption{}, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, + } +} + +// internalSchemaClient is an interface that defines the methods available from Cloud Pub/Sub API. +type internalSchemaClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + CreateSchema(context.Context, *pubsubpb.CreateSchemaRequest, ...gax.CallOption) (*pubsubpb.Schema, error) + GetSchema(context.Context, *pubsubpb.GetSchemaRequest, ...gax.CallOption) (*pubsubpb.Schema, error) + ListSchemas(context.Context, *pubsubpb.ListSchemasRequest, ...gax.CallOption) *SchemaIterator + DeleteSchema(context.Context, *pubsubpb.DeleteSchemaRequest, ...gax.CallOption) error + ValidateSchema(context.Context, *pubsubpb.ValidateSchemaRequest, ...gax.CallOption) (*pubsubpb.ValidateSchemaResponse, error) + ValidateMessage(context.Context, *pubsubpb.ValidateMessageRequest, ...gax.CallOption) (*pubsubpb.ValidateMessageResponse, error) + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) +} + +// SchemaClient is a client for interacting with Cloud Pub/Sub API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// Service for doing schema-related operations. +type SchemaClient struct { + // The internal transport-dependent client. + internalClient internalSchemaClient + + // The call options for this service. + CallOptions *SchemaCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *SchemaClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *SchemaClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *SchemaClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// CreateSchema creates a schema. +func (c *SchemaClient) CreateSchema(ctx context.Context, req *pubsubpb.CreateSchemaRequest, opts ...gax.CallOption) (*pubsubpb.Schema, error) { + return c.internalClient.CreateSchema(ctx, req, opts...) +} + +// GetSchema gets a schema. +func (c *SchemaClient) GetSchema(ctx context.Context, req *pubsubpb.GetSchemaRequest, opts ...gax.CallOption) (*pubsubpb.Schema, error) { + return c.internalClient.GetSchema(ctx, req, opts...) +} + +// ListSchemas lists schemas in a project. +func (c *SchemaClient) ListSchemas(ctx context.Context, req *pubsubpb.ListSchemasRequest, opts ...gax.CallOption) *SchemaIterator { + return c.internalClient.ListSchemas(ctx, req, opts...) +} + +// DeleteSchema deletes a schema. +func (c *SchemaClient) DeleteSchema(ctx context.Context, req *pubsubpb.DeleteSchemaRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteSchema(ctx, req, opts...) +} + +// ValidateSchema validates a schema. +func (c *SchemaClient) ValidateSchema(ctx context.Context, req *pubsubpb.ValidateSchemaRequest, opts ...gax.CallOption) (*pubsubpb.ValidateSchemaResponse, error) { + return c.internalClient.ValidateSchema(ctx, req, opts...) +} + +// ValidateMessage validates a message against a schema. +func (c *SchemaClient) ValidateMessage(ctx context.Context, req *pubsubpb.ValidateMessageRequest, opts ...gax.CallOption) (*pubsubpb.ValidateMessageResponse, error) { + return c.internalClient.ValidateMessage(ctx, req, opts...) +} + +// GetIamPolicy gets the access control policy for a resource. Returns an empty policy +// if the resource exists and does not have a policy set. +func (c *SchemaClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.GetIamPolicy(ctx, req, opts...) +} + +// SetIamPolicy sets the access control policy on the specified resource. Replaces +// any existing policy. +// +// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED +// errors. +func (c *SchemaClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.SetIamPolicy(ctx, req, opts...) +} + +// TestIamPermissions returns permissions that a caller has on the specified resource. If the +// resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware UIs and command-line tools, not for authorization +// checking. This operation may “fail open” without warning. +func (c *SchemaClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + return c.internalClient.TestIamPermissions(ctx, req, opts...) +} + +// schemaGRPCClient is a client for interacting with Cloud Pub/Sub API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type schemaGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE + disableDeadlines bool + + // Points back to the CallOptions field of the containing SchemaClient + CallOptions **SchemaCallOptions + + // The gRPC API client. + schemaClient pubsubpb.SchemaServiceClient + + iamPolicyClient iampb.IAMPolicyClient + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewSchemaClient creates a new schema service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// Service for doing schema-related operations. +func NewSchemaClient(ctx context.Context, opts ...option.ClientOption) (*SchemaClient, error) { + clientOpts := defaultSchemaGRPCClientOptions() + if newSchemaClientHook != nil { + hookOpts, err := newSchemaClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + disableDeadlines, err := checkDisableDeadlines() + if err != nil { + return nil, err + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := SchemaClient{CallOptions: defaultSchemaCallOptions()} + + c := &schemaGRPCClient{ + connPool: connPool, + disableDeadlines: disableDeadlines, + schemaClient: pubsubpb.NewSchemaServiceClient(connPool), + CallOptions: &client.CallOptions, + iamPolicyClient: iampb.NewIAMPolicyClient(connPool), + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *schemaGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *schemaGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *schemaGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *schemaGRPCClient) CreateSchema(ctx context.Context, req *pubsubpb.CreateSchemaRequest, opts ...gax.CallOption) (*pubsubpb.Schema, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CreateSchema[0:len((*c.CallOptions).CreateSchema):len((*c.CallOptions).CreateSchema)], opts...) + var resp *pubsubpb.Schema + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.schemaClient.CreateSchema(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *schemaGRPCClient) GetSchema(ctx context.Context, req *pubsubpb.GetSchemaRequest, opts ...gax.CallOption) (*pubsubpb.Schema, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetSchema[0:len((*c.CallOptions).GetSchema):len((*c.CallOptions).GetSchema)], opts...) + var resp *pubsubpb.Schema + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.schemaClient.GetSchema(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *schemaGRPCClient) ListSchemas(ctx context.Context, req *pubsubpb.ListSchemasRequest, opts ...gax.CallOption) *SchemaIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListSchemas[0:len((*c.CallOptions).ListSchemas):len((*c.CallOptions).ListSchemas)], opts...) + it := &SchemaIterator{} + req = proto.Clone(req).(*pubsubpb.ListSchemasRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Schema, string, error) { + resp := &pubsubpb.ListSchemasResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.schemaClient.ListSchemas(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetSchemas(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *schemaGRPCClient) DeleteSchema(ctx context.Context, req *pubsubpb.DeleteSchemaRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).DeleteSchema[0:len((*c.CallOptions).DeleteSchema):len((*c.CallOptions).DeleteSchema)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.schemaClient.DeleteSchema(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *schemaGRPCClient) ValidateSchema(ctx context.Context, req *pubsubpb.ValidateSchemaRequest, opts ...gax.CallOption) (*pubsubpb.ValidateSchemaResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ValidateSchema[0:len((*c.CallOptions).ValidateSchema):len((*c.CallOptions).ValidateSchema)], opts...) + var resp *pubsubpb.ValidateSchemaResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.schemaClient.ValidateSchema(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *schemaGRPCClient) ValidateMessage(ctx context.Context, req *pubsubpb.ValidateMessageRequest, opts ...gax.CallOption) (*pubsubpb.ValidateMessageResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ValidateMessage[0:len((*c.CallOptions).ValidateMessage):len((*c.CallOptions).ValidateMessage)], opts...) + var resp *pubsubpb.ValidateMessageResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.schemaClient.ValidateMessage(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *schemaGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *schemaGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *schemaGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SchemaIterator manages a stream of *pubsubpb.Schema. +type SchemaIterator struct { + items []*pubsubpb.Schema + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Schema, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SchemaIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SchemaIterator) Next() (*pubsubpb.Schema, error) { + var item *pubsubpb.Schema + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SchemaIterator) bufLen() int { + return len(it.items) +} + +func (it *SchemaIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go index 5bbf8908059b3..e5ea42364a7ab 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,17 +23,21 @@ import ( "net/url" "time" - "github.com/golang/protobuf/proto" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" + iampb "google.golang.org/genproto/googleapis/iam/v1" pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" ) +var newSubscriberClientHook clientHook + // SubscriberCallOptions contains the retry settings for each method of SubscriberClient. type SubscriberCallOptions struct { CreateSubscription []gax.CallOption @@ -52,13 +56,18 @@ type SubscriberCallOptions struct { UpdateSnapshot []gax.CallOption DeleteSnapshot []gax.CallOption Seek []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption } -func defaultSubscriberClientOptions() []option.ClientOption { +func defaultSubscriberGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ - option.WithEndpoint("pubsub.googleapis.com:443"), - option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), - option.WithScopes(DefaultAuthScopes()...), + internaloption.WithDefaultEndpoint("pubsub.googleapis.com:443"), + internaloption.WithDefaultMTLSEndpoint("pubsub.mtls.googleapis.com:443"), + internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -260,86 +269,356 @@ func defaultSubscriberCallOptions() *SubscriberCallOptions { }) }), }, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, } } +// internalSubscriberClient is an interface that defines the methods available from Cloud Pub/Sub API. +type internalSubscriberClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + CreateSubscription(context.Context, *pubsubpb.Subscription, ...gax.CallOption) (*pubsubpb.Subscription, error) + GetSubscription(context.Context, *pubsubpb.GetSubscriptionRequest, ...gax.CallOption) (*pubsubpb.Subscription, error) + UpdateSubscription(context.Context, *pubsubpb.UpdateSubscriptionRequest, ...gax.CallOption) (*pubsubpb.Subscription, error) + ListSubscriptions(context.Context, *pubsubpb.ListSubscriptionsRequest, ...gax.CallOption) *SubscriptionIterator + DeleteSubscription(context.Context, *pubsubpb.DeleteSubscriptionRequest, ...gax.CallOption) error + ModifyAckDeadline(context.Context, *pubsubpb.ModifyAckDeadlineRequest, ...gax.CallOption) error + Acknowledge(context.Context, *pubsubpb.AcknowledgeRequest, ...gax.CallOption) error + Pull(context.Context, *pubsubpb.PullRequest, ...gax.CallOption) (*pubsubpb.PullResponse, error) + StreamingPull(context.Context, ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) + ModifyPushConfig(context.Context, *pubsubpb.ModifyPushConfigRequest, ...gax.CallOption) error + GetSnapshot(context.Context, *pubsubpb.GetSnapshotRequest, ...gax.CallOption) (*pubsubpb.Snapshot, error) + ListSnapshots(context.Context, *pubsubpb.ListSnapshotsRequest, ...gax.CallOption) *SnapshotIterator + CreateSnapshot(context.Context, *pubsubpb.CreateSnapshotRequest, ...gax.CallOption) (*pubsubpb.Snapshot, error) + UpdateSnapshot(context.Context, *pubsubpb.UpdateSnapshotRequest, ...gax.CallOption) (*pubsubpb.Snapshot, error) + DeleteSnapshot(context.Context, *pubsubpb.DeleteSnapshotRequest, ...gax.CallOption) error + Seek(context.Context, *pubsubpb.SeekRequest, ...gax.CallOption) (*pubsubpb.SeekResponse, error) + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) +} + // SubscriberClient is a client for interacting with Cloud Pub/Sub API. -// // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The service that an application uses to manipulate subscriptions and to +// consume messages from a subscription via the Pull method or by +// establishing a bi-directional stream using the StreamingPull method. type SubscriberClient struct { + // The internal transport-dependent client. + internalClient internalSubscriberClient + + // The call options for this service. + CallOptions *SubscriberCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *SubscriberClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *SubscriberClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *SubscriberClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// CreateSubscription creates a subscription to a given topic. See the [resource name rules] +// (https://cloud.google.com/pubsub/docs/admin#resource_names (at https://cloud.google.com/pubsub/docs/admin#resource_names)). +// If the subscription already exists, returns ALREADY_EXISTS. +// If the corresponding topic doesn’t exist, returns NOT_FOUND. +// +// If the name is not provided in the request, the server will assign a random +// name for this subscription on the same project as the topic, conforming +// to the [resource name format] +// (https://cloud.google.com/pubsub/docs/admin#resource_names (at https://cloud.google.com/pubsub/docs/admin#resource_names)). The generated +// name is populated in the returned Subscription object. Note that for REST +// API requests, you must specify a name in the request. +func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + return c.internalClient.CreateSubscription(ctx, req, opts...) +} + +// GetSubscription gets the configuration details of a subscription. +func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + return c.internalClient.GetSubscription(ctx, req, opts...) +} + +// UpdateSubscription updates an existing subscription. Note that certain properties of a +// subscription, such as its topic, are not modifiable. +func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + return c.internalClient.UpdateSubscription(ctx, req, opts...) +} + +// ListSubscriptions lists matching subscriptions. +func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest, opts ...gax.CallOption) *SubscriptionIterator { + return c.internalClient.ListSubscriptions(ctx, req, opts...) +} + +// DeleteSubscription deletes an existing subscription. All messages retained in the subscription +// are immediately dropped. Calls to Pull after deletion will return +// NOT_FOUND. After a subscription is deleted, a new one may be created with +// the same name, but the new one has no association with the old +// subscription or its topic unless the same topic is specified. +func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteSubscription(ctx, req, opts...) +} + +// ModifyAckDeadline modifies the ack deadline for a specific message. This method is useful +// to indicate that more time is needed to process a message by the +// subscriber, or to make the message available for redelivery if the +// processing was interrupted. Note that this does not modify the +// subscription-level ackDeadlineSeconds used for subsequent messages. +func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error { + return c.internalClient.ModifyAckDeadline(ctx, req, opts...) +} + +// Acknowledge acknowledges the messages associated with the ack_ids in the +// AcknowledgeRequest. The Pub/Sub system can remove the relevant messages +// from the subscription. +// +// Acknowledging a message whose ack deadline has expired may succeed, +// but such a message may be redelivered later. Acknowledging a message more +// than once will not result in an error. +func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest, opts ...gax.CallOption) error { + return c.internalClient.Acknowledge(ctx, req, opts...) +} + +// Pull pulls messages from the server. The server may return UNAVAILABLE if +// there are too many concurrent pull requests pending for the given +// subscription. +func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) { + return c.internalClient.Pull(ctx, req, opts...) +} + +// StreamingPull establishes a stream with the server, which sends messages down to the +// client. The client streams acknowledgements and ack deadline modifications +// back to the server. The server will close the stream and return the status +// on any error. The server may close the stream with status UNAVAILABLE to +// reassign server-side resources, in which case, the client should +// re-establish the stream. Flow control can be achieved by configuring the +// underlying RPC channel. +func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) { + return c.internalClient.StreamingPull(ctx, opts...) +} + +// ModifyPushConfig modifies the PushConfig for a specified subscription. +// +// This may be used to change a push subscription to a pull one (signified by +// an empty PushConfig) or vice versa, or change the endpoint URL and other +// attributes of a push subscription. Messages will accumulate for delivery +// continuously through the call regardless of changes to the PushConfig. +func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error { + return c.internalClient.ModifyPushConfig(ctx, req, opts...) +} + +// GetSnapshot gets the configuration details of a snapshot. Snapshots are used in +// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow you to manage message acknowledgments in bulk. That +// is, you can set the acknowledgment state of messages in an existing +// subscription to the state captured by a snapshot. +func (c *SubscriberClient) GetSnapshot(ctx context.Context, req *pubsubpb.GetSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + return c.internalClient.GetSnapshot(ctx, req, opts...) +} + +// ListSnapshots lists the existing snapshots. Snapshots are used in Seek (at https://cloud.google.com/pubsub/docs/replay-overview) operations, which +// allow you to manage message acknowledgments in bulk. That is, you can set +// the acknowledgment state of messages in an existing subscription to the +// state captured by a snapshot. +func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator { + return c.internalClient.ListSnapshots(ctx, req, opts...) +} + +// CreateSnapshot creates a snapshot from the requested subscription. Snapshots are used in +// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) operations, +// which allow you to manage message acknowledgments in bulk. That is, you can +// set the acknowledgment state of messages in an existing subscription to the +// state captured by a snapshot. +// If the snapshot already exists, returns ALREADY_EXISTS. +// If the requested subscription doesn’t exist, returns NOT_FOUND. +// If the backlog in the subscription is too old – and the resulting snapshot +// would expire in less than 1 hour – then FAILED_PRECONDITION is returned. +// See also the Snapshot.expire_time field. If the name is not provided in +// the request, the server will assign a random +// name for this snapshot on the same project as the subscription, conforming +// to the [resource name format] +// (https://cloud.google.com/pubsub/docs/admin#resource_names (at https://cloud.google.com/pubsub/docs/admin#resource_names)). The +// generated name is populated in the returned Snapshot object. Note that for +// REST API requests, you must specify a name in the request. +func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + return c.internalClient.CreateSnapshot(ctx, req, opts...) +} + +// UpdateSnapshot updates an existing snapshot. Snapshots are used in +// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow +// you to manage message acknowledgments in bulk. That is, you can set the +// acknowledgment state of messages in an existing subscription to the state +// captured by a snapshot. +func (c *SubscriberClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + return c.internalClient.UpdateSnapshot(ctx, req, opts...) +} + +// DeleteSnapshot removes an existing snapshot. Snapshots are used in [Seek] +// (https://cloud.google.com/pubsub/docs/replay-overview (at https://cloud.google.com/pubsub/docs/replay-overview)) operations, which +// allow you to manage message acknowledgments in bulk. That is, you can set +// the acknowledgment state of messages in an existing subscription to the +// state captured by a snapshot. +// When the snapshot is deleted, all messages retained in the snapshot +// are immediately dropped. After a snapshot is deleted, a new one may be +// created with the same name, but the new one has no association with the old +// snapshot or its subscription, unless the same subscription is specified. +func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteSnapshot(ctx, req, opts...) +} + +// Seek seeks an existing subscription to a point in time or to a given snapshot, +// whichever is provided in the request. Snapshots are used in [Seek] +// (https://cloud.google.com/pubsub/docs/replay-overview (at https://cloud.google.com/pubsub/docs/replay-overview)) operations, which +// allow you to manage message acknowledgments in bulk. That is, you can set +// the acknowledgment state of messages in an existing subscription to the +// state captured by a snapshot. Note that both the subscription and the +// snapshot must be on the same topic. +func (c *SubscriberClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, opts ...gax.CallOption) (*pubsubpb.SeekResponse, error) { + return c.internalClient.Seek(ctx, req, opts...) +} + +// GetIamPolicy gets the access control policy for a resource. Returns an empty policy +// if the resource exists and does not have a policy set. +func (c *SubscriberClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.GetIamPolicy(ctx, req, opts...) +} + +// SetIamPolicy sets the access control policy on the specified resource. Replaces +// any existing policy. +// +// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED +// errors. +func (c *SubscriberClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.SetIamPolicy(ctx, req, opts...) +} + +// TestIamPermissions returns permissions that a caller has on the specified resource. If the +// resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware UIs and command-line tools, not for authorization +// checking. This operation may “fail open” without warning. +func (c *SubscriberClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + return c.internalClient.TestIamPermissions(ctx, req, opts...) +} + +// subscriberGRPCClient is a client for interacting with Cloud Pub/Sub API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type subscriberGRPCClient struct { // Connection pool of gRPC connections to the service. connPool gtransport.ConnPool + // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE + disableDeadlines bool + + // Points back to the CallOptions field of the containing SubscriberClient + CallOptions **SubscriberCallOptions + // The gRPC API client. subscriberClient pubsubpb.SubscriberClient - // The call options for this service. - CallOptions *SubscriberCallOptions + iamPolicyClient iampb.IAMPolicyClient // The x-goog-* metadata to be sent with each request. xGoogMetadata metadata.MD } -// NewSubscriberClient creates a new subscriber client. +// NewSubscriberClient creates a new subscriber client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. // // The service that an application uses to manipulate subscriptions and to // consume messages from a subscription via the Pull method or by // establishing a bi-directional stream using the StreamingPull method. func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*SubscriberClient, error) { - connPool, err := gtransport.DialPool(ctx, append(defaultSubscriberClientOptions(), opts...)...) + clientOpts := defaultSubscriberGRPCClientOptions() + if newSubscriberClientHook != nil { + hookOpts, err := newSubscriberClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + disableDeadlines, err := checkDisableDeadlines() if err != nil { return nil, err } - c := &SubscriberClient{ - connPool: connPool, - CallOptions: defaultSubscriberCallOptions(), + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := SubscriberClient{CallOptions: defaultSubscriberCallOptions()} + + c := &subscriberGRPCClient{ + connPool: connPool, + disableDeadlines: disableDeadlines, subscriberClient: pubsubpb.NewSubscriberClient(connPool), + CallOptions: &client.CallOptions, + iamPolicyClient: iampb.NewIAMPolicyClient(connPool), } - c.SetGoogleClientInfo() + c.setGoogleClientInfo() - return c, nil + client.internalClient = c + + return &client, nil } // Connection returns a connection to the API service. // -// Deprecated. -func (c *SubscriberClient) Connection() *grpc.ClientConn { +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *subscriberGRPCClient) Connection() *grpc.ClientConn { return c.connPool.Conn() } -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *SubscriberClient) Close() error { - return c.connPool.Close() -} - -// SetGoogleClientInfo sets the name and version of the application in +// setGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *SubscriberClient) SetGoogleClientInfo(keyval ...string) { +func (c *subscriberGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } -// CreateSubscription creates a subscription to a given topic. See the -// -// resource name rules (at https://cloud.google.com/pubsub/docs/admin#resource_names). -// If the subscription already exists, returns ALREADY_EXISTS. -// If the corresponding topic doesn’t exist, returns NOT_FOUND. -// -// If the name is not provided in the request, the server will assign a random -// name for this subscription on the same project as the topic, conforming -// to the -// resource name -// format (at https://cloud.google.com/pubsub/docs/admin#resource_names). The -// generated name is populated in the returned Subscription object. Note that -// for REST API requests, you must specify a name in the request. -func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *subscriberGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *subscriberGRPCClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateSubscription[0:len(c.CallOptions.CreateSubscription):len(c.CallOptions.CreateSubscription)], opts...) + opts = append((*c.CallOptions).CreateSubscription[0:len((*c.CallOptions).CreateSubscription):len((*c.CallOptions).CreateSubscription)], opts...) var resp *pubsubpb.Subscription err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -352,11 +631,16 @@ func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb return resp, nil } -// GetSubscription gets the configuration details of a subscription. -func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { +func (c *subscriberGRPCClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetSubscription[0:len(c.CallOptions.GetSubscription):len(c.CallOptions.GetSubscription)], opts...) + opts = append((*c.CallOptions).GetSubscription[0:len((*c.CallOptions).GetSubscription):len((*c.CallOptions).GetSubscription)], opts...) var resp *pubsubpb.Subscription err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -369,12 +653,16 @@ func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.Ge return resp, nil } -// UpdateSubscription updates an existing subscription. Note that certain properties of a -// subscription, such as its topic, are not modifiable. -func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { +func (c *subscriberGRPCClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription.name", url.QueryEscape(req.GetSubscription().GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateSubscription[0:len(c.CallOptions.UpdateSubscription):len(c.CallOptions.UpdateSubscription)], opts...) + opts = append((*c.CallOptions).UpdateSubscription[0:len((*c.CallOptions).UpdateSubscription):len((*c.CallOptions).UpdateSubscription)], opts...) var resp *pubsubpb.Subscription err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -387,19 +675,21 @@ func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb return resp, nil } -// ListSubscriptions lists matching subscriptions. -func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest, opts ...gax.CallOption) *SubscriptionIterator { +func (c *subscriberGRPCClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest, opts ...gax.CallOption) *SubscriptionIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListSubscriptions[0:len(c.CallOptions.ListSubscriptions):len(c.CallOptions.ListSubscriptions)], opts...) + opts = append((*c.CallOptions).ListSubscriptions[0:len((*c.CallOptions).ListSubscriptions):len((*c.CallOptions).ListSubscriptions)], opts...) it := &SubscriptionIterator{} req = proto.Clone(req).(*pubsubpb.ListSubscriptionsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Subscription, string, error) { - var resp *pubsubpb.ListSubscriptionsResponse - req.PageToken = pageToken + resp := &pubsubpb.ListSubscriptionsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -412,7 +702,7 @@ func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb. } it.Response = resp - return resp.Subscriptions, resp.NextPageToken, nil + return resp.GetSubscriptions(), resp.GetNextPageToken(), nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) @@ -422,21 +712,24 @@ func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb. it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + return it } -// DeleteSubscription deletes an existing subscription. All messages retained in the subscription -// are immediately dropped. Calls to Pull after deletion will return -// NOT_FOUND. After a subscription is deleted, a new one may be created with -// the same name, but the new one has no association with the old -// subscription or its topic unless the same topic is specified. -func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error { +func (c *subscriberGRPCClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteSubscription[0:len(c.CallOptions.DeleteSubscription):len(c.CallOptions.DeleteSubscription)], opts...) + opts = append((*c.CallOptions).DeleteSubscription[0:len((*c.CallOptions).DeleteSubscription):len((*c.CallOptions).DeleteSubscription)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.subscriberClient.DeleteSubscription(ctx, req, settings.GRPC...) @@ -445,15 +738,16 @@ func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb return err } -// ModifyAckDeadline modifies the ack deadline for a specific message. This method is useful -// to indicate that more time is needed to process a message by the -// subscriber, or to make the message available for redelivery if the -// processing was interrupted. Note that this does not modify the -// subscription-level ackDeadlineSeconds used for subsequent messages. -func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error { +func (c *subscriberGRPCClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ModifyAckDeadline[0:len(c.CallOptions.ModifyAckDeadline):len(c.CallOptions.ModifyAckDeadline)], opts...) + opts = append((*c.CallOptions).ModifyAckDeadline[0:len((*c.CallOptions).ModifyAckDeadline):len((*c.CallOptions).ModifyAckDeadline)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.subscriberClient.ModifyAckDeadline(ctx, req, settings.GRPC...) @@ -462,17 +756,16 @@ func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb. return err } -// Acknowledge acknowledges the messages associated with the ack_ids in the -// AcknowledgeRequest. The Pub/Sub system can remove the relevant messages -// from the subscription. -// -// Acknowledging a message whose ack deadline has expired may succeed, -// but such a message may be redelivered later. Acknowledging a message more -// than once will not result in an error. -func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest, opts ...gax.CallOption) error { +func (c *subscriberGRPCClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest, opts ...gax.CallOption) error { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.Acknowledge[0:len(c.CallOptions.Acknowledge):len(c.CallOptions.Acknowledge)], opts...) + opts = append((*c.CallOptions).Acknowledge[0:len((*c.CallOptions).Acknowledge):len((*c.CallOptions).Acknowledge)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.subscriberClient.Acknowledge(ctx, req, settings.GRPC...) @@ -481,13 +774,16 @@ func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.Acknow return err } -// Pull pulls messages from the server. The server may return UNAVAILABLE if -// there are too many concurrent pull requests pending for the given -// subscription. -func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) { +func (c *subscriberGRPCClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.Pull[0:len(c.CallOptions.Pull):len(c.CallOptions.Pull)], opts...) + opts = append((*c.CallOptions).Pull[0:len((*c.CallOptions).Pull):len((*c.CallOptions).Pull)], opts...) var resp *pubsubpb.PullResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -500,17 +796,10 @@ func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, return resp, nil } -// StreamingPull establishes a stream with the server, which sends messages down to the -// client. The client streams acknowledgements and ack deadline modifications -// back to the server. The server will close the stream and return the status -// on any error. The server may close the stream with status UNAVAILABLE to -// reassign server-side resources, in which case, the client should -// re-establish the stream. Flow control can be achieved by configuring the -// underlying RPC channel. -func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) { +func (c *subscriberGRPCClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) { ctx = insertMetadata(ctx, c.xGoogMetadata) - opts = append(c.CallOptions.StreamingPull[0:len(c.CallOptions.StreamingPull):len(c.CallOptions.StreamingPull)], opts...) var resp pubsubpb.Subscriber_StreamingPullClient + opts = append((*c.CallOptions).StreamingPull[0:len((*c.CallOptions).StreamingPull):len((*c.CallOptions).StreamingPull)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.subscriberClient.StreamingPull(ctx, settings.GRPC...) @@ -522,16 +811,16 @@ func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOp return resp, nil } -// ModifyPushConfig modifies the PushConfig for a specified subscription. -// -// This may be used to change a push subscription to a pull one (signified by -// an empty PushConfig) or vice versa, or change the endpoint URL and other -// attributes of a push subscription. Messages will accumulate for delivery -// continuously through the call regardless of changes to the PushConfig. -func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error { +func (c *subscriberGRPCClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ModifyPushConfig[0:len(c.CallOptions.ModifyPushConfig):len(c.CallOptions.ModifyPushConfig)], opts...) + opts = append((*c.CallOptions).ModifyPushConfig[0:len((*c.CallOptions).ModifyPushConfig):len((*c.CallOptions).ModifyPushConfig)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.subscriberClient.ModifyPushConfig(ctx, req, settings.GRPC...) @@ -540,15 +829,16 @@ func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.M return err } -// GetSnapshot gets the configuration details of a snapshot. Snapshots are used in -// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) -// operations, which allow you to manage message acknowledgments in bulk. That -// is, you can set the acknowledgment state of messages in an existing -// subscription to the state captured by a snapshot. -func (c *SubscriberClient) GetSnapshot(ctx context.Context, req *pubsubpb.GetSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { +func (c *subscriberGRPCClient) GetSnapshot(ctx context.Context, req *pubsubpb.GetSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "snapshot", url.QueryEscape(req.GetSnapshot()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetSnapshot[0:len(c.CallOptions.GetSnapshot):len(c.CallOptions.GetSnapshot)], opts...) + opts = append((*c.CallOptions).GetSnapshot[0:len((*c.CallOptions).GetSnapshot):len((*c.CallOptions).GetSnapshot)], opts...) var resp *pubsubpb.Snapshot err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -561,24 +851,21 @@ func (c *SubscriberClient) GetSnapshot(ctx context.Context, req *pubsubpb.GetSna return resp, nil } -// ListSnapshots lists the existing snapshots. Snapshots are used in -// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) -// operations, which allow -// you to manage message acknowledgments in bulk. That is, you can set the -// acknowledgment state of messages in an existing subscription to the state -// captured by a snapshot. -func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator { +func (c *subscriberGRPCClient) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListSnapshots[0:len(c.CallOptions.ListSnapshots):len(c.CallOptions.ListSnapshots)], opts...) + opts = append((*c.CallOptions).ListSnapshots[0:len((*c.CallOptions).ListSnapshots):len((*c.CallOptions).ListSnapshots)], opts...) it := &SnapshotIterator{} req = proto.Clone(req).(*pubsubpb.ListSnapshotsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Snapshot, string, error) { - var resp *pubsubpb.ListSnapshotsResponse - req.PageToken = pageToken + resp := &pubsubpb.ListSnapshotsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -591,7 +878,7 @@ func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.List } it.Response = resp - return resp.Snapshots, resp.NextPageToken, nil + return resp.GetSnapshots(), resp.GetNextPageToken(), nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) @@ -601,36 +888,24 @@ func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.List it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + return it } -// CreateSnapshot creates a snapshot from the requested subscription. Snapshots are used in -// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) -// operations, which allow -// you to manage message acknowledgments in bulk. That is, you can set the -// acknowledgment state of messages in an existing subscription to the state -// captured by a snapshot. -// -// -// If the snapshot already exists, returns ALREADY_EXISTS. -// If the requested subscription doesn’t exist, returns NOT_FOUND. -// If the backlog in the subscription is too old – and the resulting snapshot -// would expire in less than 1 hour – then FAILED_PRECONDITION is returned. -// See also the Snapshot.expire_time field. If the name is not provided in -// the request, the server will assign a random -// name for this snapshot on the same project as the subscription, conforming -// to the -// resource name -// format (at https://cloud.google.com/pubsub/docs/admin#resource_names). The -// generated name is populated in the returned Snapshot object. Note that for -// REST API requests, you must specify a name in the request. -func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { +func (c *subscriberGRPCClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateSnapshot[0:len(c.CallOptions.CreateSnapshot):len(c.CallOptions.CreateSnapshot)], opts...) + opts = append((*c.CallOptions).CreateSnapshot[0:len((*c.CallOptions).CreateSnapshot):len((*c.CallOptions).CreateSnapshot)], opts...) var resp *pubsubpb.Snapshot err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -643,16 +918,16 @@ func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.Cre return resp, nil } -// UpdateSnapshot updates an existing snapshot. Snapshots are used in -// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) -// operations, which allow -// you to manage message acknowledgments in bulk. That is, you can set the -// acknowledgment state of messages in an existing subscription to the state -// captured by a snapshot. -func (c *SubscriberClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { +func (c *subscriberGRPCClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "snapshot.name", url.QueryEscape(req.GetSnapshot().GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateSnapshot[0:len(c.CallOptions.UpdateSnapshot):len(c.CallOptions.UpdateSnapshot)], opts...) + opts = append((*c.CallOptions).UpdateSnapshot[0:len((*c.CallOptions).UpdateSnapshot):len((*c.CallOptions).UpdateSnapshot)], opts...) var resp *pubsubpb.Snapshot err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -665,22 +940,16 @@ func (c *SubscriberClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.Upd return resp, nil } -// DeleteSnapshot removes an existing snapshot. Snapshots are used in -// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) -// operations, which allow -// you to manage message acknowledgments in bulk. That is, you can set the -// acknowledgment state of messages in an existing subscription to the state -// captured by a snapshot. -// -// -// When the snapshot is deleted, all messages retained in the snapshot -// are immediately dropped. After a snapshot is deleted, a new one may be -// created with the same name, but the new one has no association with the old -// snapshot or its subscription, unless the same subscription is specified. -func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest, opts ...gax.CallOption) error { +func (c *subscriberGRPCClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest, opts ...gax.CallOption) error { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "snapshot", url.QueryEscape(req.GetSnapshot()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteSnapshot[0:len(c.CallOptions.DeleteSnapshot):len(c.CallOptions.DeleteSnapshot)], opts...) + opts = append((*c.CallOptions).DeleteSnapshot[0:len((*c.CallOptions).DeleteSnapshot):len((*c.CallOptions).DeleteSnapshot)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.subscriberClient.DeleteSnapshot(ctx, req, settings.GRPC...) @@ -689,18 +958,16 @@ func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.Del return err } -// Seek seeks an existing subscription to a point in time or to a given snapshot, -// whichever is provided in the request. Snapshots are used in -// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) -// operations, which allow -// you to manage message acknowledgments in bulk. That is, you can set the -// acknowledgment state of messages in an existing subscription to the state -// captured by a snapshot. Note that both the subscription and the snapshot -// must be on the same topic. -func (c *SubscriberClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, opts ...gax.CallOption) (*pubsubpb.SeekResponse, error) { +func (c *subscriberGRPCClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, opts ...gax.CallOption) (*pubsubpb.SeekResponse, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.Seek[0:len(c.CallOptions.Seek):len(c.CallOptions.Seek)], opts...) + opts = append((*c.CallOptions).Seek[0:len((*c.CallOptions).Seek):len((*c.CallOptions).Seek)], opts...) var resp *pubsubpb.SeekResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -713,6 +980,57 @@ func (c *SubscriberClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, return resp, nil } +func (c *subscriberGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *subscriberGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *subscriberGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + // SnapshotIterator manages a stream of *pubsubpb.Snapshot. type SnapshotIterator struct { items []*pubsubpb.Snapshot diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/version.go b/vendor/cloud.google.com/go/pubsub/apiv1/version.go new file mode 100644 index 0000000000000..7ba352b539a4f --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/version.go @@ -0,0 +1,23 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapicgen. DO NOT EDIT. + +package pubsub + +import "cloud.google.com/go/pubsub/internal" + +func init() { + versionClient = internal.Version +} diff --git a/vendor/cloud.google.com/go/pubsub/debug.go b/vendor/cloud.google.com/go/pubsub/debug.go index 977ae577f7e73..056bd6342d4f7 100644 --- a/vendor/cloud.google.com/go/pubsub/debug.go +++ b/vendor/cloud.google.com/go/pubsub/debug.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build psdebug // +build psdebug package pubsub diff --git a/vendor/cloud.google.com/go/pubsub/doc.go b/vendor/cloud.google.com/go/pubsub/doc.go index a86fc3d4a5945..b7f5f94fce678 100644 --- a/vendor/cloud.google.com/go/pubsub/doc.go +++ b/vendor/cloud.google.com/go/pubsub/doc.go @@ -24,17 +24,16 @@ https://cloud.google.com/pubsub/docs See https://godoc.org/cloud.google.com/go for authentication, timeouts, connection pooling and similar aspects of this package. - -Publishing +# Publishing Google Cloud Pub/Sub messages are published to topics. Topics may be created using the pubsub package like so: - topic, err := pubsubClient.CreateTopic(context.Background(), "topic-name") + topic, err := pubsubClient.CreateTopic(context.Background(), "topic-name") Messages may then be published to a topic: - res := topic.Publish(ctx, &pubsub.Message{Data: []byte("payload")}) + res := topic.Publish(ctx, &pubsub.Message{Data: []byte("payload")}) Publish queues the message for publishing and returns immediately. When enough messages have accumulated, or enough time has elapsed, the batch of messages is @@ -46,82 +45,102 @@ blocks until the message has been sent to the service. The first time you call Publish on a topic, goroutines are started in the background. To clean up these goroutines, call Stop: - topic.Stop() - + topic.Stop() -Receiving +# Receiving To receive messages published to a topic, clients create subscriptions to the topic. There may be more than one subscription per topic; each message that is published to the topic will be delivered to all of its subscriptions. -Subsciptions may be created like so: +Subscriptions may be created like so: - sub, err := pubsubClient.CreateSubscription(context.Background(), "sub-name", - pubsub.SubscriptionConfig{Topic: topic}) + sub, err := pubsubClient.CreateSubscription(context.Background(), "sub-name", + pubsub.SubscriptionConfig{Topic: topic}) Messages are then consumed from a subscription via callback. - err := sub.Receive(context.Background(), func(ctx context.Context, m *Message) { - log.Printf("Got message: %s", m.Data) - m.Ack() - }) - if err != nil { - // Handle error. - } + err := sub.Receive(context.Background(), func(ctx context.Context, m *Message) { + log.Printf("Got message: %s", m.Data) + m.Ack() + }) + if err != nil { + // Handle error. + } The callback is invoked concurrently by multiple goroutines, maximizing throughput. To terminate a call to Receive, cancel its context. Once client code has processed the message, it must call Message.Ack or -message.Nack, otherwise the message will eventually be redelivered. If the -client cannot or doesn't want to process the message, it can call Message.Nack +Message.Nack; otherwise the message will eventually be redelivered. Ack/Nack +MUST be called within the Receive handler function, and not from a goroutine. +Otherwise, flow control (e.g. ReceiveSettings.MaxOutstandingMessages) will +not be respected, and messages can get orphaned when cancelling Receive. + +If the client cannot or doesn't want to process the message, it can call Message.Nack to speed redelivery. For more information and configuration options, see -"Deadlines" below. +"Ack Deadlines" below. -Note: It is possible for Messages to be redelivered, even if Message.Ack has +Note: It is possible for Messages to be redelivered even if Message.Ack has been called. Client code must be robust to multiple deliveries of messages. -Note: This uses pubsub's streaming pull feature. This feature properties that +Note: This uses pubsub's streaming pull feature. This feature has properties that may be surprising. Please take a look at https://cloud.google.com/pubsub/docs/pull#streamingpull for more details on how streaming pull behaves compared to the synchronous pull method. +# Streams Management + +Streams used for streaming pull are configured by setting sub.ReceiveSettings.NumGoroutines. +However, the total number of streams possible is capped by the gRPC connection pool setting. +By default, the number of connections in the pool is min(4,GOMAXPROCS). -Deadlines +If you have 4 or more CPU cores, the default setting allows a maximum of 400 streams which is still a good default for most cases. +If you want to have more open streams (such as for low CPU core machines), you should pass in the grpc option as described below: + + opts := []option.ClientOption{ + option.WithGRPCConnectionPool(8), + } + client, err := pubsub.NewClient(ctx, projID, opts...) + +# Ack Deadlines The default pubsub deadlines are suitable for most use cases, but may be overridden. This section describes the tradeoffs that should be considered when overriding the defaults. Behind the scenes, each message returned by the Pub/Sub server has an -associated lease, known as an "ACK deadline". Unless a message is -acknowledged within the ACK deadline, or the client requests that -the ACK deadline be extended, the message will become eligible for redelivery. +associated lease, known as an "ack deadline". Unless a message is +acknowledged within the ack deadline, or the client requests that +the ack deadline be extended, the message will become eligible for redelivery. As a convenience, the pubsub client will automatically extend deadlines until either: - * Message.Ack or Message.Nack is called, or - * The "MaxExtension" period elapses from the time the message is fetched from the server. - -ACK deadlines are extended periodically by the client. The initial ACK -deadline given to messages is 10s. The period between extensions, as well as the -length of the extension, automatically adjust depending on the time it takes to ack -messages, up to 10m. This has the effect that subscribers that process messages -quickly have their message ack deadlines extended for a short amount, whereas + - Message.Ack or Message.Nack is called, or + - The "MaxExtension" duration elapses from the time the message is fetched from + the server. This defaults to 60m. + +Ack deadlines are extended periodically by the client. The initial ack +deadline given to messages is based on the subscription's AckDeadline property, +which defaults to 10s. The period between extensions, as well as the +length of the extension, automatically adjusts based on the time it takes the +subscriber application to ack messages (based on the 99th percentile of ack latency). +By default, this extension period is capped at 10m, but this limit can be configured +by the "MaxExtensionPeriod" setting. This has the effect that subscribers that process +messages quickly have their message ack deadlines extended for a short amount, whereas subscribers that process message slowly have their message ack deadlines extended for a large amount. The net effect is fewer RPCs sent from the client library. For example, consider a subscriber that takes 3 minutes to process each message. -Since the library has already recorded several 3 minute "time to ack"s in a +Since the library has already recorded several 3-minute "ack latencies"s in a percentile distribution, future message extensions are sent with a value of 3 minutes, every 3 minutes. Suppose the application crashes 5 seconds after the library sends such an extension: the Pub/Sub server would wait the remaining 2m55s before re-sending the messages out to other subscribers. -Please note that the client library does not use the subscription's AckDeadline -by default. To enforce the subscription AckDeadline, set MaxExtension to the -subscription's AckDeadline: +Please note that by default, the client library does not use the subscription's +AckDeadline for the MaxExtension value. To enforce the subscription's AckDeadline, +set MaxExtension to the subscription's AckDeadline: cfg, err := sub.Config(ctx) if err != nil { @@ -130,11 +149,29 @@ subscription's AckDeadline: sub.ReceiveSettings.MaxExtension = cfg.AckDeadline - -Slow Message Processing +# Slow Message Processing For use cases where message processing exceeds 30 minutes, we recommend using the base client in a pull model, since long-lived streams are periodically killed by firewalls. See the example at https://godoc.org/cloud.google.com/go/pubsub/apiv1#example-SubscriberClient-Pull-LengthyClientProcessing + +# Emulator + +To use an emulator with this library, you can set the PUBSUB_EMULATOR_HOST +environment variable to the address at which your emulator is running. This will +send requests to that address instead of to Cloud Pub/Sub. You can then create +and use a client as usual: + + // Set PUBSUB_EMULATOR_HOST environment variable. + err := os.Setenv("PUBSUB_EMULATOR_HOST", "localhost:9000") + if err != nil { + // TODO: Handle error. + } + // Create client as usual. + client, err := pubsub.NewClient(ctx, "my-project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() */ package pubsub // import "cloud.google.com/go/pubsub" diff --git a/vendor/cloud.google.com/go/pubsub/flow_controller.go b/vendor/cloud.google.com/go/pubsub/flow_controller.go index 3f165a0ac18ca..8b35250815561 100644 --- a/vendor/cloud.google.com/go/pubsub/flow_controller.go +++ b/vendor/cloud.google.com/go/pubsub/flow_controller.go @@ -16,12 +16,60 @@ package pubsub import ( "context" + "errors" "sync/atomic" "golang.org/x/sync/semaphore" ) -// flowController implements flow control for Subscription.Receive. +// LimitExceededBehavior configures the behavior that flowController can use in case +// the flow control limits are exceeded. +type LimitExceededBehavior int + +const ( + // FlowControlIgnore disables flow control. + FlowControlIgnore LimitExceededBehavior = iota + // FlowControlBlock signals to wait until the request can be made without exceeding the limit. + FlowControlBlock + // FlowControlSignalError signals an error to the caller of acquire. + FlowControlSignalError +) + +// flowControllerPurpose indicates whether a flowController is for a topic or a +// subscription. +type flowControllerPurpose int + +const ( + flowControllerPurposeSubscription flowControllerPurpose = iota + flowControllerPurposeTopic +) + +// FlowControlSettings controls flow control for messages while publishing or subscribing. +type FlowControlSettings struct { + // MaxOutstandingMessages is the maximum number of buffered messages to be published. + // If less than or equal to zero, this is disabled. + MaxOutstandingMessages int + + // MaxOutstandingBytes is the maximum size of buffered messages to be published. + // If less than or equal to zero, this is disabled. + MaxOutstandingBytes int + + // LimitExceededBehavior configures the behavior when trying to publish + // additional messages while the flow controller is full. The available options + // are Ignore (disable, default), Block, and SignalError (publish + // results will return an error). + LimitExceededBehavior LimitExceededBehavior +} + +var ( + // ErrFlowControllerMaxOutstandingMessages indicates that outstanding messages exceeds MaxOutstandingMessages. + ErrFlowControllerMaxOutstandingMessages = errors.New("pubsub: MaxOutstandingMessages flow controller limit exceeded") + + // ErrFlowControllerMaxOutstandingBytes indicates that outstanding bytes of messages exceeds MaxOutstandingBytes. + ErrFlowControllerMaxOutstandingBytes = errors.New("pubsub: MaxOutstandingBytes flow control limit exceeded") +) + +// flowController implements flow control for publishing and subscribing. type flowController struct { maxCount int maxSize int // max total size of messages @@ -31,81 +79,111 @@ type flowController struct { // small releases. // Atomic. countRemaining int64 + // Number of outstanding bytes remaining. Atomic. + bytesRemaining int64 + limitBehavior LimitExceededBehavior + purpose flowControllerPurpose } // newFlowController creates a new flowController that ensures no more than // maxCount messages or maxSize bytes are outstanding at once. If maxCount or // maxSize is < 1, then an unlimited number of messages or bytes is permitted, // respectively. -func newFlowController(maxCount, maxSize int) *flowController { - fc := &flowController{ - maxCount: maxCount, - maxSize: maxSize, - semCount: nil, - semSize: nil, +func newFlowController(fc FlowControlSettings) flowController { + f := flowController{ + maxCount: fc.MaxOutstandingMessages, + maxSize: fc.MaxOutstandingBytes, + semCount: nil, + semSize: nil, + limitBehavior: fc.LimitExceededBehavior, } - if maxCount > 0 { - fc.semCount = semaphore.NewWeighted(int64(maxCount)) + if fc.MaxOutstandingMessages > 0 { + f.semCount = semaphore.NewWeighted(int64(fc.MaxOutstandingMessages)) } - if maxSize > 0 { - fc.semSize = semaphore.NewWeighted(int64(maxSize)) + if fc.MaxOutstandingBytes > 0 { + f.semSize = semaphore.NewWeighted(int64(fc.MaxOutstandingBytes)) } - return fc + return f +} + +func newTopicFlowController(fc FlowControlSettings) flowController { + f := newFlowController(fc) + f.purpose = flowControllerPurposeTopic + return f +} + +func newSubscriptionFlowController(fc FlowControlSettings) flowController { + f := newFlowController(fc) + f.purpose = flowControllerPurposeSubscription + return f } -// acquire blocks until one message of size bytes can proceed or ctx is done. -// It returns nil in the first case, or ctx.Err() in the second. +// acquire allocates space for a message: the message count and its size. // -// acquire allows large messages to proceed by treating a size greater than maxSize +// In FlowControlSignalError mode, large messages greater than maxSize +// will be result in an error. In other modes, large messages will be treated // as if it were equal to maxSize. func (f *flowController) acquire(ctx context.Context, size int) error { - if f.semCount != nil { - if err := f.semCount.Acquire(ctx, 1); err != nil { - return err + switch f.limitBehavior { + case FlowControlIgnore: + return nil + case FlowControlBlock: + if f.semCount != nil { + if err := f.semCount.Acquire(ctx, 1); err != nil { + return err + } } - } - if f.semSize != nil { - if err := f.semSize.Acquire(ctx, f.bound(size)); err != nil { - if f.semCount != nil { - f.semCount.Release(1) + if f.semSize != nil { + if err := f.semSize.Acquire(ctx, f.bound(size)); err != nil { + if f.semCount != nil { + f.semCount.Release(1) + } + return err + } + } + case FlowControlSignalError: + if f.semCount != nil { + if !f.semCount.TryAcquire(1) { + return ErrFlowControllerMaxOutstandingMessages + } + } + if f.semSize != nil { + // Try to acquire the full size of the message here. + if !f.semSize.TryAcquire(int64(size)) { + if f.semCount != nil { + f.semCount.Release(1) + } + return ErrFlowControllerMaxOutstandingBytes } - return err } } - atomic.AddInt64(&f.countRemaining, 1) - return nil -} -// tryAcquire returns false if acquire would block. Otherwise, it behaves like -// acquire and returns true. -// -// tryAcquire allows large messages to proceed by treating a size greater than -// maxSize as if it were equal to maxSize. -func (f *flowController) tryAcquire(size int) bool { if f.semCount != nil { - if !f.semCount.TryAcquire(1) { - return false - } + outstandingMessages := atomic.AddInt64(&f.countRemaining, 1) + f.recordOutstandingMessages(ctx, outstandingMessages) } + if f.semSize != nil { - if !f.semSize.TryAcquire(f.bound(size)) { - if f.semCount != nil { - f.semCount.Release(1) - } - return false - } + outstandingBytes := atomic.AddInt64(&f.bytesRemaining, f.bound(size)) + f.recordOutstandingBytes(ctx, outstandingBytes) } - atomic.AddInt64(&f.countRemaining, 1) - return true + return nil } // release notes that one message of size bytes is no longer outstanding. -func (f *flowController) release(size int) { - atomic.AddInt64(&f.countRemaining, -1) +func (f *flowController) release(ctx context.Context, size int) { + if f.limitBehavior == FlowControlIgnore { + return + } + if f.semCount != nil { + outstandingMessages := atomic.AddInt64(&f.countRemaining, -1) + f.recordOutstandingMessages(ctx, outstandingMessages) f.semCount.Release(1) } if f.semSize != nil { + outstandingBytes := atomic.AddInt64(&f.bytesRemaining, -1*f.bound(size)) + f.recordOutstandingBytes(ctx, outstandingBytes) f.semSize.Release(f.bound(size)) } } @@ -117,6 +195,26 @@ func (f *flowController) bound(size int) int64 { return int64(size) } +// count returns the number of outstanding messages. +// if maxCount is 0, this will always return 0. func (f *flowController) count() int { return int(atomic.LoadInt64(&f.countRemaining)) } + +func (f *flowController) recordOutstandingMessages(ctx context.Context, n int64) { + if f.purpose == flowControllerPurposeTopic { + recordStat(ctx, PublisherOutstandingMessages, n) + return + } + + recordStat(ctx, OutstandingMessages, n) +} + +func (f *flowController) recordOutstandingBytes(ctx context.Context, n int64) { + if f.purpose == flowControllerPurposeTopic { + recordStat(ctx, PublisherOutstandingBytes, n) + return + } + + recordStat(ctx, OutstandingBytes, n) +} diff --git a/vendor/cloud.google.com/go/pubsub/internal/scheduler/publish_scheduler.go b/vendor/cloud.google.com/go/pubsub/internal/scheduler/publish_scheduler.go new file mode 100644 index 0000000000000..cba172b79db20 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/internal/scheduler/publish_scheduler.go @@ -0,0 +1,207 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheduler + +import ( + "errors" + "reflect" + "sync" + "time" + + "google.golang.org/api/support/bundler" +) + +// PublishScheduler is a scheduler which is designed for Pub/Sub's Publish flow. +// It bundles items before handling them. All items in this PublishScheduler use +// the same handler. +// +// Each item is added with a given key. Items added to the empty string key are +// handled in random order. Items added to any other key are handled +// sequentially. +type PublishScheduler struct { + // Settings passed down to each bundler that gets created. + DelayThreshold time.Duration + BundleCountThreshold int + BundleByteThreshold int + BundleByteLimit int + BufferedByteLimit int + + mu sync.Mutex + bundlers sync.Map // keys -> *bundler.Bundler + outstanding sync.Map // keys -> num outstanding messages + + keysMu sync.RWMutex + // keysWithErrors tracks ordering keys that cannot accept new messages. + // A bundler might not accept new messages if publishing has failed + // for a specific ordering key, and can be resumed with topic.ResumePublish(). + keysWithErrors map[string]struct{} + + // workers is a channel that represents workers. Rather than a pool, where + // worker are "removed" until the pool is empty, the channel is more like a + // set of work desks, where workers are "added" until all the desks are full. + // + // workers does not restrict the amount of goroutines in the bundlers. + // Rather, it acts as the flow control for completion of bundler work. + workers chan struct{} + handle func(bundle interface{}) + done chan struct{} +} + +// NewPublishScheduler returns a new PublishScheduler. +// +// The workers arg is the number of workers that will operate on the queue of +// work. A reasonably large number of workers is highly recommended. If the +// workers arg is 0, then a healthy default of 10 workers is used. +// +// The scheduler does not use a parent context. If it did, canceling that +// context would immediately stop the scheduler without waiting for +// undelivered messages. +// +// The scheduler should be stopped only with FlushAndStop. +func NewPublishScheduler(workers int, handle func(bundle interface{})) *PublishScheduler { + if workers == 0 { + workers = 10 + } + + s := PublishScheduler{ + keysWithErrors: make(map[string]struct{}), + workers: make(chan struct{}, workers), + handle: handle, + done: make(chan struct{}), + } + + return &s +} + +// Add adds an item to the scheduler at a given key. +// +// Add never blocks. Buffering happens in the scheduler's publishers. There is +// no flow control. +// +// Since ordered keys require only a single outstanding RPC at once, it is +// possible to send ordered key messages to Topic.Publish (and subsequently to +// PublishScheduler.Add) faster than the bundler can publish them to the +// Pub/Sub service, resulting in a backed up queue of Pub/Sub bundles. Each +// item in the bundler queue is a goroutine. +func (s *PublishScheduler) Add(key string, item interface{}, size int) error { + select { + case <-s.done: + return errors.New("draining") + default: + } + + s.mu.Lock() + defer s.mu.Unlock() + var b *bundler.Bundler + bInterface, ok := s.bundlers.Load(key) + + if !ok { + s.outstanding.Store(key, 1) + b = bundler.NewBundler(item, func(bundle interface{}) { + s.workers <- struct{}{} + s.handle(bundle) + <-s.workers + + nlen := reflect.ValueOf(bundle).Len() + s.mu.Lock() + outsInterface, _ := s.outstanding.Load(key) + s.outstanding.Store(key, outsInterface.(int)-nlen) + if v, _ := s.outstanding.Load(key); v == 0 { + s.outstanding.Delete(key) + s.bundlers.Delete(key) + } + s.mu.Unlock() + }) + b.DelayThreshold = s.DelayThreshold + b.BundleCountThreshold = s.BundleCountThreshold + b.BundleByteThreshold = s.BundleByteThreshold + b.BundleByteLimit = s.BundleByteLimit + b.BufferedByteLimit = s.BufferedByteLimit + + if b.BufferedByteLimit == 0 { + b.BufferedByteLimit = 1e9 + } + + if key == "" { + // There's no way to express "unlimited" in the bundler, so we use + // some high number. + b.HandlerLimit = 1e9 + } else { + // HandlerLimit=1 causes the bundler to act as a sequential queue. + b.HandlerLimit = 1 + } + + s.bundlers.Store(key, b) + } else { + b = bInterface.(*bundler.Bundler) + oi, _ := s.outstanding.Load(key) + s.outstanding.Store(key, oi.(int)+1) + } + + return b.Add(item, size) +} + +// FlushAndStop begins flushing items from bundlers and from the scheduler. It +// blocks until all items have been flushed. +func (s *PublishScheduler) FlushAndStop() { + close(s.done) + s.bundlers.Range(func(_, bi interface{}) bool { + bi.(*bundler.Bundler).Flush() + return true + }) +} + +// Flush waits until all bundlers are sent. +func (s *PublishScheduler) Flush() { + var wg sync.WaitGroup + s.bundlers.Range(func(_, bi interface{}) bool { + wg.Add(1) + go func(b *bundler.Bundler) { + defer wg.Done() + b.Flush() + }(bi.(*bundler.Bundler)) + return true + }) + wg.Wait() + +} + +// IsPaused checks if the bundler associated with an ordering keys is +// paused. +func (s *PublishScheduler) IsPaused(orderingKey string) bool { + s.keysMu.RLock() + defer s.keysMu.RUnlock() + _, ok := s.keysWithErrors[orderingKey] + return ok +} + +// Pause pauses the bundler associated with the provided ordering key, +// preventing it from accepting new messages. Any outstanding messages +// that haven't been published will error. If orderingKey is empty, +// this is a no-op. +func (s *PublishScheduler) Pause(orderingKey string) { + if orderingKey != "" { + s.keysMu.Lock() + defer s.keysMu.Unlock() + s.keysWithErrors[orderingKey] = struct{}{} + } +} + +// Resume resumes accepting message with the provided ordering key. +func (s *PublishScheduler) Resume(orderingKey string) { + s.keysMu.Lock() + defer s.keysMu.Unlock() + delete(s.keysWithErrors, orderingKey) +} diff --git a/vendor/cloud.google.com/go/pubsub/internal/scheduler/receive_scheduler.go b/vendor/cloud.google.com/go/pubsub/internal/scheduler/receive_scheduler.go new file mode 100644 index 0000000000000..dafccdd1cee35 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/internal/scheduler/receive_scheduler.go @@ -0,0 +1,141 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheduler + +import ( + "errors" + "sync" +) + +// ErrReceiveDraining indicates the scheduler has shutdown and is draining. +var ErrReceiveDraining error = errors.New("pubsub: receive scheduler draining") + +// ReceiveScheduler is a scheduler which is designed for Pub/Sub's Receive flow. +// +// Each item is added with a given key. Items added to the empty string key are +// handled in random order. Items added to any other key are handled +// sequentially. +type ReceiveScheduler struct { + // workers is a channel that represents workers. Rather than a pool, where + // worker are "removed" until the pool is empty, the channel is more like a + // set of work desks, where workers are "added" until all the desks are full. + // + // A worker taking an item from the unordered queue (key="") completes a + // single item and then goes back to the pool. + // + // A worker taking an item from an ordered queue (key="something") completes + // all work in that queue until the queue is empty, then deletes the queue, + // then goes back to the pool. + workers chan struct{} + done chan struct{} + + mu sync.Mutex + m map[string][]func() +} + +// NewReceiveScheduler creates a new ReceiveScheduler. +// +// The workers arg is the number of concurrent calls to handle. If the workers +// arg is 0, then a healthy default of 10 workers is used. If less than 0, this +// will be set to an large number, similar to PublishScheduler's handler limit. +func NewReceiveScheduler(workers int) *ReceiveScheduler { + if workers == 0 { + workers = 10 + } else if workers < 0 { + workers = 1e9 + } + + return &ReceiveScheduler{ + workers: make(chan struct{}, workers), + done: make(chan struct{}), + m: make(map[string][]func()), + } +} + +// Add adds the item to be handled. Add may block. +// +// Buffering happens above the ReceiveScheduler in the form of a flow controller +// that requests batches of messages to pull. A backed up ReceiveScheduler.Add +// call causes pushback to the pubsub service (less Receive calls on the +// long-lived stream), which keeps memory footprint stable. +func (s *ReceiveScheduler) Add(key string, item interface{}, handle func(item interface{})) error { + select { + case <-s.done: + return ErrReceiveDraining + default: + } + if key == "" { + // Spawn a worker. + s.workers <- struct{}{} + go func() { + // Unordered keys can be handled immediately. + handle(item) + <-s.workers + }() + return nil + } + + // Add it to the queue. This has to happen before we enter the goroutine + // below to prevent a race from the next iteration of the key-loop + // adding another item before this one gets queued. + + s.mu.Lock() + _, ok := s.m[key] + s.m[key] = append(s.m[key], func() { + handle(item) + }) + s.mu.Unlock() + if ok { + // Someone is already working on this key. + return nil + } + + // Spawn a worker. + s.workers <- struct{}{} + + go func() { + defer func() { <-s.workers }() + + // Key-Loop: loop through the available items in the key's queue. + for { + s.mu.Lock() + if len(s.m[key]) == 0 { + // We're done processing items - the queue is empty. Delete + // the queue from the map and free up the worker. + delete(s.m, key) + s.mu.Unlock() + return + } + // Pop an item from the queue. + next := s.m[key][0] + s.m[key] = s.m[key][1:] + s.mu.Unlock() + + next() // Handle next in queue. + } + }() + + return nil +} + +// Shutdown begins flushing messages and stops accepting new Add calls. Shutdown +// does not block, or wait for all messages to be flushed. +func (s *ReceiveScheduler) Shutdown() { + select { + case <-s.done: + default: + close(s.done) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/internal/version.go b/vendor/cloud.google.com/go/pubsub/internal/version.go new file mode 100644 index 0000000000000..c828210e2d61f --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.27.1" diff --git a/vendor/cloud.google.com/go/pubsub/iterator.go b/vendor/cloud.google.com/go/pubsub/iterator.go index c42fb02d71b12..ced6a5c900899 100644 --- a/vendor/cloud.google.com/go/pubsub/iterator.go +++ b/vendor/cloud.google.com/go/pubsub/iterator.go @@ -16,18 +16,23 @@ package pubsub import ( "context" + "errors" "io" + "log" + "strings" "sync" "time" + ipubsub "cloud.google.com/go/internal/pubsub" vkit "cloud.google.com/go/pubsub/apiv1" "cloud.google.com/go/pubsub/internal/distribution" - "github.com/golang/protobuf/proto" gax "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/apierror" pb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protowire" ) // Between message receipt and ack (that is, the time spent processing a message) we want to extend the message @@ -36,21 +41,40 @@ import ( // of the actual deadline. const gracePeriod = 5 * time.Second +// ackIDBatchSize is the maximum number of ACK IDs to send in a single Ack/Modack RPC. +// The backend imposes a maximum request size limit of 524288 bytes (512 KiB) per +// acknowledge / modifyAckDeadline request. ACK IDs have a maximum size of 164 +// bytes, thus we cannot send more than 524288/176 ~= 2979 ACK IDs in an Ack/ModAc + +// Accounting for some overhead, we should thus only send a maximum of 2500 ACK +// IDs at a time. +// This is a var such that it can be modified for tests. +const ackIDBatchSize int = 2500 + +// These are vars so tests can change them. +var ( + maxDurationPerLeaseExtension = 10 * time.Minute + minDurationPerLeaseExtension = 10 * time.Second + minDurationPerLeaseExtensionExactlyOnce = 1 * time.Minute + + // The total amount of time to retry acks/modacks with exactly once delivery enabled subscriptions. + exactlyOnceDeliveryRetryDeadline = 600 * time.Second +) + type messageIterator struct { - ctx context.Context - cancel func() // the function that will cancel ctx; called in stop - po *pullOptions - ps *pullStream - subc *vkit.SubscriberClient - subName string - maxExtensionPeriod *time.Duration - kaTick <-chan time.Time // keep-alive (deadline extensions) - ackTicker *time.Ticker // message acks - nackTicker *time.Ticker // message nacks (more frequent than acks) - pingTicker *time.Ticker // sends to the stream to keep it open - failed chan struct{} // closed on stream error - drained chan struct{} // closed when stopped && no more pending messages - wg sync.WaitGroup + ctx context.Context + cancel func() // the function that will cancel ctx; called in stop + po *pullOptions + ps *pullStream + subc *vkit.SubscriberClient + subName string + kaTick <-chan time.Time // keep-alive (deadline extensions) + ackTicker *time.Ticker // message acks + nackTicker *time.Ticker // message nacks + pingTicker *time.Ticker // sends to the stream to keep it open + failed chan struct{} // closed on stream error + drained chan struct{} // closed when stopped && no more pending messages + wg sync.WaitGroup mu sync.Mutex ackTimeDist *distribution.D // dist uses seconds @@ -62,30 +86,43 @@ type messageIterator struct { // to update ack deadlines (via modack), we'll consult this table and only include IDs // that are not beyond their deadline. keepAliveDeadlines map[string]time.Time - pendingAcks map[string]bool - pendingNacks map[string]bool - pendingModAcks map[string]bool // ack IDs whose ack deadline is to be modified - err error // error from stream failure + pendingAcks map[string]*AckResult + pendingNacks map[string]*AckResult + // ack IDs whose ack deadline is to be modified + // ModAcks don't have AckResults but allows reuse of the SendModAck function. + pendingModAcks map[string]*AckResult + err error // error from stream failure + + eoMu sync.RWMutex + enableExactlyOnceDelivery bool + sendNewAckDeadline bool } // newMessageIterator starts and returns a new messageIterator. // subName is the full name of the subscription to pull messages from. // Stop must be called on the messageIterator when it is no longer needed. // The iterator always uses the background context for acking messages and extending message deadlines. -func newMessageIterator(subc *vkit.SubscriberClient, subName string, maxExtensionPeriod *time.Duration, po *pullOptions) *messageIterator { +func newMessageIterator(subc *vkit.SubscriberClient, subName string, po *pullOptions) *messageIterator { var ps *pullStream if !po.synchronous { - ps = newPullStream(context.Background(), subc.StreamingPull, subName) + maxMessages := po.maxOutstandingMessages + maxBytes := po.maxOutstandingBytes + if po.useLegacyFlowControl { + maxMessages = 0 + maxBytes = 0 + } + ps = newPullStream(context.Background(), subc.StreamingPull, subName, maxMessages, maxBytes, po.maxExtensionPeriod) } // The period will update each tick based on the distribution of acks. We'll start by arbitrarily sending // the first keepAlive halfway towards the minimum ack deadline. - keepAlivePeriod := minAckDeadline / 2 + keepAlivePeriod := minDurationPerLeaseExtension / 2 // Ack promptly so users don't lose work if client crashes. ackTicker := time.NewTicker(100 * time.Millisecond) nackTicker := time.NewTicker(100 * time.Millisecond) pingTicker := time.NewTicker(30 * time.Second) cctx, cancel := context.WithCancel(context.Background()) + cctx = withSubscriptionKey(cctx, subName) it := &messageIterator{ ctx: cctx, cancel: cancel, @@ -93,18 +130,17 @@ func newMessageIterator(subc *vkit.SubscriberClient, subName string, maxExtensio po: po, subc: subc, subName: subName, - maxExtensionPeriod: maxExtensionPeriod, kaTick: time.After(keepAlivePeriod), ackTicker: ackTicker, nackTicker: nackTicker, pingTicker: pingTicker, failed: make(chan struct{}), drained: make(chan struct{}), - ackTimeDist: distribution.New(int(maxAckDeadline/time.Second) + 1), + ackTimeDist: distribution.New(int(maxDurationPerLeaseExtension/time.Second) + 1), keepAliveDeadlines: map[string]time.Time{}, - pendingAcks: map[string]bool{}, - pendingNacks: map[string]bool{}, - pendingModAcks: map[string]bool{}, + pendingAcks: map[string]*AckResult{}, + pendingNacks: map[string]*AckResult{}, + pendingModAcks: map[string]*AckResult{}, } it.wg.Add(1) go it.sender() @@ -142,16 +178,26 @@ func (it *messageIterator) checkDrained() { } } +// Given a receiveTime, add the elapsed time to the iterator's ack distribution. +// These values are bounded by the ModifyAckDeadline limits, which are +// min/maxDurationPerLeaseExtension. +func (it *messageIterator) addToDistribution(receiveTime time.Time) { + d := time.Since(receiveTime) + d = maxDuration(d, minDurationPerLeaseExtension) + d = minDuration(d, maxDurationPerLeaseExtension) + it.ackTimeDist.Record(int(d / time.Second)) +} + // Called when a message is acked/nacked. -func (it *messageIterator) done(ackID string, ack bool, receiveTime time.Time) { - it.ackTimeDist.Record(int(time.Since(receiveTime) / time.Second)) +func (it *messageIterator) done(ackID string, ack bool, r *AckResult, receiveTime time.Time) { + it.addToDistribution(receiveTime) it.mu.Lock() defer it.mu.Unlock() delete(it.keepAliveDeadlines, ackID) if ack { - it.pendingAcks[ackID] = true + it.pendingAcks[ackID] = r } else { - it.pendingNacks[ackID] = true + it.pendingNacks[ackID] = r } it.checkDrained() } @@ -200,34 +246,39 @@ func (it *messageIterator) receive(maxToPull int32) ([]*Message, error) { return nil, it.fail(err) } recordStat(it.ctx, PullCount, int64(len(rmsgs))) - msgs, err := convertMessages(rmsgs) + now := time.Now() + msgs, err := convertMessages(rmsgs, now, it.done) if err != nil { return nil, it.fail(err) } // We received some messages. Remember them so we can keep them alive. Also, // do a receipt mod-ack when streaming. maxExt := time.Now().Add(it.po.maxExtension) - ackIDs := map[string]bool{} + ackIDs := map[string]*AckResult{} it.mu.Lock() - now := time.Now() for _, m := range msgs { - m.receiveTime = now - addRecv(m.ID, m.ackID, now) - m.doneFunc = it.done - it.keepAliveDeadlines[m.ackID] = maxExt + ackID := msgAckID(m) + addRecv(m.ID, ackID, now) + it.keepAliveDeadlines[ackID] = maxExt // Don't change the mod-ack if the message is going to be nacked. This is // possible if there are retries. - if !it.pendingNacks[m.ackID] { - ackIDs[m.ackID] = true + if _, ok := it.pendingNacks[ackID]; !ok { + // Don't use the message's AckResult here since these are only for receipt modacks. + // ModAckResults are transparent to the user anyway so these can automatically succeed. + // We can't use an empty AckResult here either since SetAckResult will try to + // close the channel without checking if it exists. + ackIDs[ackID] = newSuccessAckResult() } } deadline := it.ackDeadline() it.mu.Unlock() - if len(ackIDs) > 0 { - if !it.sendModAck(ackIDs, deadline) { - return nil, it.err + go func() { + if len(ackIDs) > 0 { + // Don't check the return value of this since modacks are fire and forget, + // meaning errors should not be propagated to the client. + it.sendModAck(ackIDs, deadline) } - } + }() return msgs, nil } @@ -243,6 +294,8 @@ func (it *messageIterator) pullMessages(maxToPull int32) ([]*pb.ReceivedMessage, switch { case err == context.Canceled: return nil, nil + case status.Code(err) == codes.Canceled: + return nil, nil case err != nil: return nil, err default: @@ -255,6 +308,12 @@ func (it *messageIterator) recvMessages() ([]*pb.ReceivedMessage, error) { if err != nil { return nil, err } + it.eoMu.Lock() + if got := res.GetSubscriptionProperties().GetExactlyOnceDeliveryEnabled(); got != it.enableExactlyOnceDelivery { + it.sendNewAckDeadline = true + it.enableExactlyOnceDelivery = got + } + it.eoMu.Unlock() return res.ReceivedMessages, nil } @@ -320,36 +379,30 @@ func (it *messageIterator) sender() { sendPing = !it.po.synchronous } // Lock is held here. - var acks, nacks, modAcks map[string]bool + var acks, nacks, modAcks map[string]*AckResult if sendAcks { acks = it.pendingAcks - it.pendingAcks = map[string]bool{} + it.pendingAcks = map[string]*AckResult{} } if sendNacks { nacks = it.pendingNacks - it.pendingNacks = map[string]bool{} + it.pendingNacks = map[string]*AckResult{} } if sendModAcks { modAcks = it.pendingModAcks - it.pendingModAcks = map[string]bool{} + it.pendingModAcks = map[string]*AckResult{} } it.mu.Unlock() // Make Ack and ModAck RPCs. if sendAcks { - if !it.sendAck(acks) { - return - } + it.sendAck(acks) } if sendNacks { // Nack indicated by modifying the deadline to zero. - if !it.sendModAck(nacks, 0) { - return - } + it.sendModAck(nacks, 0) } if sendModAcks { - if !it.sendModAck(modAcks, dl) { - return - } + it.sendModAck(modAcks, dl) } if sendPing { it.pingStream() @@ -371,96 +424,192 @@ func (it *messageIterator) handleKeepAlives() { // https://groups.google.com/forum/#!msg/golang-nuts/UciASUb03Js/pzSq5iVFAQAJ. delete(it.keepAliveDeadlines, id) } else { - // This will not conflict with a nack, because nacking removes the ID from keepAliveDeadlines. - it.pendingModAcks[id] = true + // Use a success AckResult since we don't propagate ModAcks back to the user. + it.pendingModAcks[id] = newSuccessAckResult() } } it.checkDrained() } -func (it *messageIterator) sendAck(m map[string]bool) bool { - // Account for the Subscription field. - overhead := calcFieldSizeString(it.subName) - return it.sendAckIDRPC(m, maxPayload-overhead, func(ids []string) error { - recordStat(it.ctx, AckCount, int64(len(ids))) - addAcks(ids) +// sendAck is used to confirm acknowledgement of a message. If exactly once delivery is +// enabled, we'll retry these messages for a short duration in a goroutine. +func (it *messageIterator) sendAck(m map[string]*AckResult) { + ackIDs := make([]string, 0, len(m)) + for k := range m { + ackIDs = append(ackIDs, k) + } + it.eoMu.RLock() + exactlyOnceDelivery := it.enableExactlyOnceDelivery + it.eoMu.RUnlock() + + var toSend []string + for len(ackIDs) > 0 { + toSend, ackIDs = splitRequestIDs(ackIDs, ackIDBatchSize) + + recordStat(it.ctx, AckCount, int64(len(toSend))) + addAcks(toSend) // Use context.Background() as the call's context, not it.ctx. We don't // want to cancel this RPC when the iterator is stopped. - return it.subc.Acknowledge(context.Background(), &pb.AcknowledgeRequest{ + cctx2, cancel2 := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel2() + err := it.subc.Acknowledge(cctx2, &pb.AcknowledgeRequest{ Subscription: it.subName, - AckIds: ids, + AckIds: toSend, }) - }) + if exactlyOnceDelivery { + resultsByAckID := make(map[string]*AckResult) + for _, ackID := range toSend { + resultsByAckID[ackID] = m[ackID] + } + st, md := extractMetadata(err) + _, toRetry := processResults(st, resultsByAckID, md) + if len(toRetry) > 0 { + // Retry acks in a separate goroutine. + go func() { + it.retryAcks(toRetry) + }() + } + } + } } +// sendModAck is used to extend the lease of messages or nack them. // The receipt mod-ack amount is derived from a percentile distribution based // on the time it takes to process messages. The percentile chosen is the 99%th // percentile in order to capture the highest amount of time necessary without -// considering 1% outliers. -func (it *messageIterator) sendModAck(m map[string]bool, deadline time.Duration) bool { +// considering 1% outliers. If the ModAck RPC fails and exactly once delivery is +// enabled, we retry it in a separate goroutine for a short duration. +func (it *messageIterator) sendModAck(m map[string]*AckResult, deadline time.Duration) { deadlineSec := int32(deadline / time.Second) - // Account for the Subscription and AckDeadlineSeconds fields. - overhead := calcFieldSizeString(it.subName) + calcFieldSizeInt(int(deadlineSec)) - return it.sendAckIDRPC(m, maxPayload-overhead, func(ids []string) error { + ackIDs := make([]string, 0, len(m)) + for k := range m { + ackIDs = append(ackIDs, k) + } + it.eoMu.RLock() + exactlyOnceDelivery := it.enableExactlyOnceDelivery + it.eoMu.RUnlock() + var toSend []string + for len(ackIDs) > 0 { + toSend, ackIDs = splitRequestIDs(ackIDs, ackIDBatchSize) if deadline == 0 { - recordStat(it.ctx, NackCount, int64(len(ids))) + recordStat(it.ctx, NackCount, int64(len(toSend))) } else { - recordStat(it.ctx, ModAckCount, int64(len(ids))) + recordStat(it.ctx, ModAckCount, int64(len(toSend))) } - addModAcks(ids, deadlineSec) - // Retry this RPC on Unavailable for a short amount of time, then give up - // without returning a fatal error. The utility of this RPC is by nature - // transient (since the deadline is relative to the current time) and it - // isn't crucial for correctness (since expired messages will just be - // resent). - cctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - bo := gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: time.Second, - Multiplier: 2, - } - for { - err := it.subc.ModifyAckDeadline(cctx, &pb.ModifyAckDeadlineRequest{ - Subscription: it.subName, - AckDeadlineSeconds: deadlineSec, - AckIds: ids, - }) - switch status.Code(err) { - case codes.Unavailable: - if err := gax.Sleep(cctx, bo.Pause()); err == nil { - continue - } - // Treat sleep timeout like RPC timeout. - fallthrough - case codes.DeadlineExceeded: - // Timeout. Not a fatal error, but note that it happened. - recordStat(it.ctx, ModAckTimeoutCount, 1) - return nil - default: - // Any other error is fatal. - return err + addModAcks(toSend, deadlineSec) + // Use context.Background() as the call's context, not it.ctx. We don't + // want to cancel this RPC when the iterator is stopped. + cctx, cancel2 := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel2() + err := it.subc.ModifyAckDeadline(cctx, &pb.ModifyAckDeadlineRequest{ + Subscription: it.subName, + AckDeadlineSeconds: deadlineSec, + AckIds: toSend, + }) + if exactlyOnceDelivery { + resultsByAckID := make(map[string]*AckResult) + for _, ackID := range toSend { + resultsByAckID[ackID] = m[ackID] + } + + st, md := extractMetadata(err) + _, toRetry := processResults(st, resultsByAckID, md) + if len(toRetry) > 0 { + // Retry modacks/nacks in a separate goroutine. + go func() { + it.retryModAcks(toRetry, deadlineSec) + }() } } - }) + } } -func (it *messageIterator) sendAckIDRPC(ackIDSet map[string]bool, maxSize int, call func([]string) error) bool { - ackIDs := make([]string, 0, len(ackIDSet)) - for k := range ackIDSet { - ackIDs = append(ackIDs, k) +// retryAcks retries the ack RPC with backoff. This must be called in a goroutine +// in it.sendAck(), with a max of 2500 ackIDs. +func (it *messageIterator) retryAcks(m map[string]*AckResult) { + ctx, cancel := context.WithTimeout(context.Background(), exactlyOnceDeliveryRetryDeadline) + defer cancel() + bo := newExactlyOnceBackoff() + for { + if ctx.Err() != nil { + for _, r := range m { + ipubsub.SetAckResult(r, AcknowledgeStatusOther, ctx.Err()) + } + return + } + // Don't need to split map since this is the retry function and + // there is already a max of 2500 ackIDs here. + ackIDs := make([]string, 0, len(m)) + for k := range m { + ackIDs = append(ackIDs, k) + } + cctx2, cancel2 := context.WithTimeout(ctx, 60*time.Second) + defer cancel2() + err := it.subc.Acknowledge(cctx2, &pb.AcknowledgeRequest{ + Subscription: it.subName, + AckIds: ackIDs, + }) + st, md := extractMetadata(err) + _, toRetry := processResults(st, m, md) + if len(toRetry) == 0 { + return + } + time.Sleep(bo.Pause()) + m = toRetry } - var toSend []string - for len(ackIDs) > 0 { - toSend, ackIDs = splitRequestIDs(ackIDs, maxSize) - if err := call(toSend); err != nil { - // The underlying client handles retries, so any error is fatal to the - // iterator. - it.fail(err) - return false +} + +// retryModAcks retries the modack RPC with backoff. This must be called in a goroutine +// in it.sendModAck(), with a max of 2500 ackIDs. Modacks are retried up to 3 times +// since after that, the message will have expired. Nacks are retried up until the default +// deadline of 10 minutes. +func (it *messageIterator) retryModAcks(m map[string]*AckResult, deadlineSec int32) { + bo := newExactlyOnceBackoff() + retryCount := 0 + ctx, cancel := context.WithTimeout(context.Background(), exactlyOnceDeliveryRetryDeadline) + defer cancel() + for { + // If context is done, complete all remaining Nacks with DeadlineExceeded + // ModAcks are not exposed to the user so these don't need to be modified. + if ctx.Err() != nil { + if deadlineSec == 0 { + for _, r := range m { + ipubsub.SetAckResult(r, AcknowledgeStatusOther, ctx.Err()) + } + } + return + } + // Only retry modack requests up to 3 times. + if deadlineSec != 0 && retryCount > 3 { + ackIDs := make([]string, 0, len(m)) + for k := range m { + ackIDs = append(ackIDs, k) + } + log.Printf("automatic lease modack retry failed for following IDs: %v", ackIDs) + return + } + // Don't need to split map since this is the retry function and + // there is already a max of 2500 ackIDs here. + ackIDs := make([]string, 0, len(m)) + for k := range m { + ackIDs = append(ackIDs, k) } + cctx2, cancel2 := context.WithTimeout(ctx, 60*time.Second) + defer cancel2() + err := it.subc.ModifyAckDeadline(cctx2, &pb.ModifyAckDeadlineRequest{ + Subscription: it.subName, + AckIds: ackIDs, + AckDeadlineSeconds: deadlineSec, + }) + st, md := extractMetadata(err) + _, toRetry := processResults(st, m, md) + if len(toRetry) == 0 { + return + } + time.Sleep(bo.Pause()) + m = toRetry + retryCount++ } - return true } // Send a message to the stream to keep it open. The stream will close if there's no @@ -470,8 +619,14 @@ func (it *messageIterator) sendAckIDRPC(ackIDSet map[string]bool, maxSize int, c // default ack deadline, and if the messages are small enough so that many can fit // into the buffer. func (it *messageIterator) pingStream() { - // Ignore error; if the stream is broken, this doesn't matter anyway. - _ = it.ps.Send(&pb.StreamingPullRequest{}) + spr := &pb.StreamingPullRequest{} + it.eoMu.RLock() + if it.sendNewAckDeadline { + spr.StreamAckDeadlineSeconds = int32(it.ackDeadline()) + it.sendNewAckDeadline = false + } + it.eoMu.RUnlock() + it.ps.Send(spr) } // calcFieldSizeString returns the number of bytes string fields @@ -479,7 +634,7 @@ func (it *messageIterator) pingStream() { func calcFieldSizeString(fields ...string) int { overhead := 0 for _, field := range fields { - overhead += 1 + len(field) + proto.SizeVarint(uint64(len(field))) + overhead += 1 + len(field) + protowire.SizeVarint(uint64(len(field))) } return overhead } @@ -489,26 +644,18 @@ func calcFieldSizeString(fields ...string) int { func calcFieldSizeInt(fields ...int) int { overhead := 0 for _, field := range fields { - overhead += 1 + proto.SizeVarint(uint64(field)) + overhead += 1 + protowire.SizeVarint(uint64(field)) } return overhead } // splitRequestIDs takes a slice of ackIDs and returns two slices such that the first -// ackID slice can be used in a request where the payload does not exceed maxSize. -func splitRequestIDs(ids []string, maxSize int) (prefix, remainder []string) { - size := 0 - i := 0 - // TODO(hongalex): Use binary search to find split index, since ackIDs are - // fairly constant. - for size < maxSize && i < len(ids) { - size += calcFieldSizeString(ids[i]) - i++ - } - if size > maxSize { - i-- - } - return ids[:i], ids[i:] +// ackID slice can be used in a request where the payload does not exceed ackIDBatchSize. +func splitRequestIDs(ids []string, maxBatchSize int) (prefix, remainder []string) { + if len(ids) < maxBatchSize { + return ids, []string{} + } + return ids[:maxBatchSize], ids[maxBatchSize:] } // The deadline to ack is derived from a percentile distribution based @@ -519,15 +666,107 @@ func splitRequestIDs(ids []string, maxSize int) (prefix, remainder []string) { // expiration. func (it *messageIterator) ackDeadline() time.Duration { pt := time.Duration(it.ackTimeDist.Percentile(.99)) * time.Second + it.eoMu.RLock() + enableExactlyOnce := it.enableExactlyOnceDelivery + it.eoMu.RUnlock() + return boundedDuration(pt, it.po.minExtensionPeriod, it.po.maxExtensionPeriod, enableExactlyOnce) +} - if *it.maxExtensionPeriod > 0 && pt > *it.maxExtensionPeriod { - return *it.maxExtensionPeriod +func boundedDuration(ackDeadline, minExtension, maxExtension time.Duration, exactlyOnce bool) time.Duration { + // If the user explicitly sets a maxExtensionPeriod, respect it. + if maxExtension > 0 { + ackDeadline = minDuration(ackDeadline, maxExtension) } - if pt > maxAckDeadline { - return maxAckDeadline + + // If the user explicitly sets a minExtensionPeriod, respect it. + if minExtension > 0 { + ackDeadline = maxDuration(ackDeadline, minExtension) + } else if exactlyOnce { + // Higher minimum ack_deadline for subscriptions with + // exactly-once delivery enabled. + ackDeadline = maxDuration(ackDeadline, minDurationPerLeaseExtensionExactlyOnce) + } else if ackDeadline < minDurationPerLeaseExtension { + // Otherwise, lower bound is min ack extension. This is normally bounded + // when adding datapoints to the distribution, but this is needed for + // the initial few calls to ackDeadline. + ackDeadline = minDurationPerLeaseExtension + } + + return ackDeadline +} + +func minDuration(x, y time.Duration) time.Duration { + if x < y { + return x } - if pt < minAckDeadline { - return minAckDeadline + return y +} + +func maxDuration(x, y time.Duration) time.Duration { + if x > y { + return x + } + return y +} + +const ( + transientErrStringPrefix = "TRANSIENT_" + permanentInvalidAckErrString = "PERMANENT_FAILURE_INVALID_ACK_ID" +) + +// extracts information from an API error for exactly once delivery's ack/modack err responses. +func extractMetadata(err error) (*status.Status, map[string]string) { + apiErr, ok := apierror.FromError(err) + if ok { + return apiErr.GRPCStatus(), apiErr.Metadata() + } + return nil, nil +} + +// processResults processes AckResults by referring to errorStatus and errorsMap. +// The errors returned by the server in `errorStatus` or in `errorsByAckID` +// are used to complete the AckResults in `ackResMap` (with a success +// or error) or to return requests for further retries. +// This function returns two maps of ackID to ack results, one for completed results and the other for ones to retry. +// Logic is derived from python-pubsub: https://github.com/googleapis/python-pubsub/blob/main/google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py#L161-L220 +func processResults(errorStatus *status.Status, ackResMap map[string]*AckResult, errorsByAckID map[string]string) (map[string]*AckResult, map[string]*AckResult) { + completedResults := make(map[string]*AckResult) + retryResults := make(map[string]*AckResult) + for ackID, ar := range ackResMap { + // Handle special errors returned for ack/modack RPCs via the ErrorInfo + // sidecar metadata when exactly-once delivery is enabled. + if errAckID, ok := errorsByAckID[ackID]; ok { + if strings.HasPrefix(errAckID, transientErrStringPrefix) { + retryResults[ackID] = ar + } else { + if errAckID == permanentInvalidAckErrString { + ipubsub.SetAckResult(ar, AcknowledgeStatusInvalidAckID, errors.New(errAckID)) + } else { + ipubsub.SetAckResult(ar, AcknowledgeStatusOther, errors.New(errAckID)) + } + completedResults[ackID] = ar + } + } else if errorStatus != nil && contains(errorStatus.Code(), exactlyOnceDeliveryTemporaryRetryErrors) { + retryResults[ackID] = ar + } else if errorStatus != nil { + // Other gRPC errors are not retried. + switch errorStatus.Code() { + case codes.PermissionDenied: + ipubsub.SetAckResult(ar, AcknowledgeStatusPermissionDenied, errorStatus.Err()) + case codes.FailedPrecondition: + ipubsub.SetAckResult(ar, AcknowledgeStatusFailedPrecondition, errorStatus.Err()) + default: + ipubsub.SetAckResult(ar, AcknowledgeStatusOther, errorStatus.Err()) + } + completedResults[ackID] = ar + } else if ar != nil { + // Since no error occurred, requests with AckResults are completed successfully. + ipubsub.SetAckResult(ar, AcknowledgeStatusSuccess, nil) + completedResults[ackID] = ar + } else { + // All other requests are considered completed. + completedResults[ackID] = ar + } } - return pt + return completedResults, retryResults } diff --git a/vendor/cloud.google.com/go/pubsub/message.go b/vendor/cloud.google.com/go/pubsub/message.go index 051c1df188832..1bebb9fc526be 100644 --- a/vendor/cloud.google.com/go/pubsub/message.go +++ b/vendor/cloud.google.com/go/pubsub/message.go @@ -15,106 +15,178 @@ package pubsub import ( + "fmt" "time" - "github.com/golang/protobuf/ptypes" + ipubsub "cloud.google.com/go/internal/pubsub" pb "google.golang.org/genproto/googleapis/pubsub/v1" ) // Message represents a Pub/Sub message. -type Message struct { - // ID identifies this message. - // This ID is assigned by the server and is populated for Messages obtained from a subscription. - // This field is read-only. - ID string +// +// Message can be passed to Topic.Publish for publishing. +// +// If received in the callback passed to Subscription.Receive, client code must +// call Message.Ack or Message.Nack when finished processing the Message. Calls +// to Ack or Nack have no effect after the first call. +// +// Ack indicates successful processing of a Message. If message acknowledgement +// fails, the Message will be redelivered. Nack indicates that the client will +// not or cannot process a Message. Nack will result in the Message being +// redelivered more quickly than if it were allowed to expire. +// +// If using exactly once delivery, you should call Message.AckWithResult and +// Message.NackWithResult instead. These methods will return an AckResult, +// which tracks the state of acknowledgement operation. If the AckResult returns +// successful, the message is guaranteed NOT to be re-delivered. Otherwise, +// the AckResult will return an error with more details about the failure +// and the message may be re-delivered. +type Message = ipubsub.Message + +// msgAckHandler performs a safe cast of the message's ack handler to psAckHandler. +func msgAckHandler(m *Message, eod bool) (*psAckHandler, bool) { + ackh, ok := ipubsub.MessageAckHandler(m).(*psAckHandler) + ackh.exactlyOnceDelivery = eod + return ackh, ok +} + +func msgAckID(m *Message) string { + if ackh, ok := msgAckHandler(m, false); ok { + return ackh.ackID + } + return "" +} + +// The done method of the iterator that created a Message. +type iterDoneFunc func(string, bool, *AckResult, time.Time) + +func convertMessages(rms []*pb.ReceivedMessage, receiveTime time.Time, doneFunc iterDoneFunc) ([]*Message, error) { + msgs := make([]*Message, 0, len(rms)) + for i, m := range rms { + msg, err := toMessage(m, receiveTime, doneFunc) + if err != nil { + return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m) + } + msgs = append(msgs, msg) + } + return msgs, nil +} - // Data is the actual data in the message. - Data []byte +func toMessage(resp *pb.ReceivedMessage, receiveTime time.Time, doneFunc iterDoneFunc) (*Message, error) { + ackh := &psAckHandler{ackID: resp.AckId} + msg := ipubsub.NewMessage(ackh) + if resp.Message == nil { + return msg, nil + } - // Attributes represents the key-value pairs the current message - // is labelled with. - Attributes map[string]string + pubTime := resp.Message.PublishTime.AsTime() + var deliveryAttempt *int + if resp.DeliveryAttempt > 0 { + da := int(resp.DeliveryAttempt) + deliveryAttempt = &da + } + + msg.Data = resp.Message.Data + msg.Attributes = resp.Message.Attributes + msg.ID = resp.Message.MessageId + msg.PublishTime = pubTime + msg.DeliveryAttempt = deliveryAttempt + msg.OrderingKey = resp.Message.OrderingKey + ackh.receiveTime = receiveTime + ackh.doneFunc = doneFunc + ackh.ackResult = ipubsub.NewAckResult() + return msg, nil +} + +// AckResult holds the result from a call to Ack or Nack. +// +// Call Get to obtain the result of the Ack/NackWithResult call. Example: +// +// // Get blocks until Ack/NackWithResult completes or ctx is done. +// ackStatus, err := r.Get(ctx) +// if err != nil { +// // TODO: Handle error. +// } +type AckResult = ipubsub.AckResult + +// AcknowledgeStatus represents the status of an Ack or Nack request. +type AcknowledgeStatus = ipubsub.AcknowledgeStatus + +const ( + // AcknowledgeStatusSuccess indicates the request was a success. + AcknowledgeStatusSuccess AcknowledgeStatus = iota + // AcknowledgeStatusPermissionDenied indicates the caller does not have sufficient permissions. + AcknowledgeStatusPermissionDenied + // AcknowledgeStatusFailedPrecondition indicates the request encountered a FailedPrecondition error. + AcknowledgeStatusFailedPrecondition + // AcknowledgeStatusInvalidAckID indicates one or more of the ack IDs sent were invalid. + AcknowledgeStatusInvalidAckID + // AcknowledgeStatusOther indicates another unknown error was returned. + AcknowledgeStatusOther +) + +// psAckHandler handles ack/nack for the pubsub package. +type psAckHandler struct { // ackID is the identifier to acknowledge this message. ackID string - // The time at which the message was published. - // This is populated by the server for Messages obtained from a subscription. - // This field is read-only. - PublishTime time.Time - // receiveTime is the time the message was received by the client. receiveTime time.Time - // DeliveryAttempt is the number of times a message has been delivered. - // This is part of the dead lettering feature that forwards messages that - // fail to be processed (from nack/ack deadline timeout) to a dead letter topic. - // If dead lettering is enabled, this will be set on all attempts, starting - // with value 1. Otherwise, the value will be nil. - // This field is read-only. - // - // It is EXPERIMENTAL and a part of a closed alpha that may not be - // accessible to all users. This field is subject to change or removal - // without notice. - DeliveryAttempt *int - - // size is the approximate size of the message's data and attributes. - size int - calledDone bool // The done method of the iterator that created this Message. - doneFunc func(string, bool, time.Time) -} + doneFunc iterDoneFunc -func toMessage(resp *pb.ReceivedMessage) (*Message, error) { - if resp.Message == nil { - return &Message{ackID: resp.AckId}, nil - } + // the ack result that will be returned for this ack handler + // if AckWithResult or NackWithResult is called. + ackResult *AckResult - pubTime, err := ptypes.Timestamp(resp.Message.PublishTime) - if err != nil { - return nil, err - } + // exactlyOnceDelivery determines if the message needs to be delivered + // exactly once. + exactlyOnceDelivery bool +} - var deliveryAttempt *int - if resp.DeliveryAttempt > 0 { - da := int(resp.DeliveryAttempt) - deliveryAttempt = &da - } +func (ah *psAckHandler) OnAck() { + ah.done(true) +} - return &Message{ - ackID: resp.AckId, - Data: resp.Message.Data, - Attributes: resp.Message.Attributes, - ID: resp.Message.MessageId, - PublishTime: pubTime, - DeliveryAttempt: deliveryAttempt, - }, nil +func (ah *psAckHandler) OnNack() { + ah.done(false) } -// Ack indicates successful processing of a Message passed to the Subscriber.Receive callback. -// It should not be called on any other Message value. -// If message acknowledgement fails, the Message will be redelivered. -// Client code must call Ack or Nack when finished for each received Message. -// Calls to Ack or Nack have no effect after the first call. -func (m *Message) Ack() { - m.done(true) +func (ah *psAckHandler) OnAckWithResult() *AckResult { + if !ah.exactlyOnceDelivery { + return newSuccessAckResult() + } + // call done with true to indicate ack. + ah.done(true) + return ah.ackResult } -// Nack indicates that the client will not or cannot process a Message passed to the Subscriber.Receive callback. -// It should not be called on any other Message value. -// Nack will result in the Message being redelivered more quickly than if it were allowed to expire. -// Client code must call Ack or Nack when finished for each received Message. -// Calls to Ack or Nack have no effect after the first call. -func (m *Message) Nack() { - m.done(false) +func (ah *psAckHandler) OnNackWithResult() *AckResult { + if !ah.exactlyOnceDelivery { + return newSuccessAckResult() + } + // call done with false to indicate nack. + ah.done(false) + return ah.ackResult } -func (m *Message) done(ack bool) { - if m.calledDone { +func (ah *psAckHandler) done(ack bool) { + if ah.calledDone { return } - m.calledDone = true - m.doneFunc(m.ackID, ack, m.receiveTime) + ah.calledDone = true + if ah.doneFunc != nil { + ah.doneFunc(ah.ackID, ack, ah.ackResult, ah.receiveTime) + } +} + +// newSuccessAckResult returns an AckResult that resolves to success immediately. +func newSuccessAckResult() *AckResult { + ar := ipubsub.NewAckResult() + ipubsub.SetAckResult(ar, AcknowledgeStatusSuccess, nil) + return ar } diff --git a/vendor/cloud.google.com/go/pubsub/nodebug.go b/vendor/cloud.google.com/go/pubsub/nodebug.go index 774a74a58dfd5..92760220afc10 100644 --- a/vendor/cloud.google.com/go/pubsub/nodebug.go +++ b/vendor/cloud.google.com/go/pubsub/nodebug.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !psdebug // +build !psdebug package pubsub diff --git a/vendor/cloud.google.com/go/pubsub/pstest/fake.go b/vendor/cloud.google.com/go/pubsub/pstest/fake.go index be53bcf67eea8..cae0f2db1b912 100644 --- a/vendor/cloud.google.com/go/pubsub/pstest/fake.go +++ b/vendor/cloud.google.com/go/pubsub/pstest/fake.go @@ -34,14 +34,37 @@ import ( "time" "cloud.google.com/go/internal/testutil" - "github.com/golang/protobuf/ptypes" - durpb "github.com/golang/protobuf/ptypes/duration" - emptypb "github.com/golang/protobuf/ptypes/empty" pb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + durpb "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" ) +// ReactorOptions is a map that Server uses to look up reactors. +// Key is the function name, value is array of reactor for the function. +type ReactorOptions map[string][]Reactor + +// Reactor is an interface to allow reaction function to a certain call. +type Reactor interface { + // React handles the message types and returns results. If "handled" is false, + // then the test server will ignore the results and continue to the next reactor + // or the original handler. + React(_ interface{}) (handled bool, ret interface{}, err error) +} + +// ServerReactorOption is options passed to the server for reactor creation. +type ServerReactorOption struct { + FuncName string + Reactor Reactor +} + +type publishResponse struct { + resp *pb.PublishResponse + err error +} + // For testing. Note that even though changes to the now variable are atomic, a call // to the stored function can race with a change to that function. This could be a // problem if tests are run in parallel, or even if concurrent parts of the same test @@ -70,43 +93,83 @@ type GServer struct { pb.PublisherServer pb.SubscriberServer - mu sync.Mutex - topics map[string]*topic - subs map[string]*subscription - msgs []*Message // all messages ever published - msgsByID map[string]*Message - wg sync.WaitGroup - nextID int - streamTimeout time.Duration + mu sync.Mutex + topics map[string]*topic + subs map[string]*subscription + msgs []*Message // all messages ever published + msgsByID map[string]*Message + wg sync.WaitGroup + nextID int + streamTimeout time.Duration + timeNowFunc func() time.Time + reactorOptions ReactorOptions + schemas map[string]*pb.Schema + + // PublishResponses is a channel of responses to use for Publish. + publishResponses chan *publishResponse + // autoPublishResponse enables the server to automatically generate + // PublishResponse when publish is called. Otherwise, responses + // are generated from the publishResponses channel. + autoPublishResponse bool } // NewServer creates a new fake server running in the current process. -func NewServer() *Server { - srv, err := testutil.NewServer() +func NewServer(opts ...ServerReactorOption) *Server { + return NewServerWithPort(0, opts...) +} + +// NewServerWithPort creates a new fake server running in the current process at the specified port. +func NewServerWithPort(port int, opts ...ServerReactorOption) *Server { + srv, err := testutil.NewServerWithPort(port) if err != nil { - panic(fmt.Sprintf("pstest.NewServer: %v", err)) + panic(fmt.Sprintf("pstest.NewServerWithPort: %v", err)) + } + reactorOptions := ReactorOptions{} + for _, opt := range opts { + reactorOptions[opt.FuncName] = append(reactorOptions[opt.FuncName], opt.Reactor) } s := &Server{ srv: srv, Addr: srv.Addr, GServer: GServer{ - topics: map[string]*topic{}, - subs: map[string]*subscription{}, - msgsByID: map[string]*Message{}, + topics: map[string]*topic{}, + subs: map[string]*subscription{}, + msgsByID: map[string]*Message{}, + timeNowFunc: timeNow, + reactorOptions: reactorOptions, + publishResponses: make(chan *publishResponse, 100), + autoPublishResponse: true, + schemas: map[string]*pb.Schema{}, }, } pb.RegisterPublisherServer(srv.Gsrv, &s.GServer) pb.RegisterSubscriberServer(srv.Gsrv, &s.GServer) + pb.RegisterSchemaServiceServer(srv.Gsrv, &s.GServer) srv.Start() return s } +// SetTimeNowFunc registers f as a function to +// be used instead of time.Now for this server. +func (s *Server) SetTimeNowFunc(f func() time.Time) { + s.GServer.timeNowFunc = f +} + // Publish behaves as if the Publish RPC was called with a message with the given // data and attrs. It returns the ID of the message. // The topic will be created if it doesn't exist. // // Publish panics if there is an error, which is appropriate for testing. func (s *Server) Publish(topic string, data []byte, attrs map[string]string) string { + return s.PublishOrdered(topic, data, attrs, "") +} + +// PublishOrdered behaves as if the Publish RPC was called with a message with the given +// data, attrs and ordering key. It returns the ID of the message. +// The topic will be created if it doesn't exist. +// +// PublishOrdered panics if there is an error, which is appropriate for testing. +func (s *Server) PublishOrdered(topic string, data []byte, attrs map[string]string, orderingKey string) string { const topicPattern = "projects/*/topics/*" ok, err := path.Match(topicPattern, topic) if err != nil { @@ -115,10 +178,10 @@ func (s *Server) Publish(topic string, data []byte, attrs map[string]string) str if !ok { panic(fmt.Sprintf("topic name must be of the form %q", topicPattern)) } - _, _ = s.GServer.CreateTopic(context.TODO(), &pb.Topic{Name: topic}) + s.GServer.CreateTopic(context.TODO(), &pb.Topic{Name: topic}) req := &pb.PublishRequest{ Topic: topic, - Messages: []*pb.PubsubMessage{{Data: data, Attributes: attrs}}, + Messages: []*pb.PubsubMessage{{Data: data, Attributes: attrs, OrderingKey: orderingKey}}, } res, err := s.GServer.Publish(context.TODO(), req) if err != nil { @@ -127,6 +190,35 @@ func (s *Server) Publish(topic string, data []byte, attrs map[string]string) str return res.MessageIds[0] } +// AddPublishResponse adds a new publish response to the channel used for +// responding to publish requests. +func (s *Server) AddPublishResponse(pbr *pb.PublishResponse, err error) { + pr := &publishResponse{} + if err != nil { + pr.err = err + } else { + pr.resp = pbr + } + s.GServer.publishResponses <- pr +} + +// SetAutoPublishResponse controls whether to automatically respond +// to messages published or to use user-added responses from the +// publishResponses channel. +func (s *Server) SetAutoPublishResponse(autoPublishResponse bool) { + s.GServer.mu.Lock() + defer s.GServer.mu.Unlock() + s.GServer.autoPublishResponse = autoPublishResponse +} + +// ResetPublishResponses resets the buffered publishResponses channel +// with a new buffered channel with the given size. +func (s *Server) ResetPublishResponses(size int) { + s.GServer.mu.Lock() + defer s.GServer.mu.Unlock() + s.GServer.publishResponses = make(chan *publishResponse, size) +} + // SetStreamTimeout sets the amount of time a stream will be active before it shuts // itself down. This mimics the real service's behavior of closing streams after 30 // minutes. If SetStreamTimeout is never called or is passed zero, streams never shut @@ -143,14 +235,15 @@ type Message struct { Data []byte Attributes map[string]string PublishTime time.Time - Deliveries int // number of times delivery of the message was attempted - Acks int // number of acks received from clients + Deliveries int // number of times delivery of the message was attempted + Acks int // number of acks received from clients + Modacks []Modack // modacks received by server for this message + OrderingKey string // protected by server mutex deliveries int acks int - Modacks []Modack // modacks received by server for this message - + modacks []Modack } // Modack represents a modack sent to the server. @@ -169,6 +262,7 @@ func (s *Server) Messages() []*Message { for _, m := range s.GServer.msgs { m.Deliveries = m.deliveries m.Acks = m.acks + m.Modacks = append([]Modack(nil), m.modacks...) msgs = append(msgs, m) } return msgs @@ -184,6 +278,7 @@ func (s *Server) Message(id string) *Message { if m != nil { m.Deliveries = m.deliveries m.Acks = m.acks + m.Modacks = append([]Modack(nil), m.modacks...) } return m } @@ -217,9 +312,16 @@ func (s *GServer) CreateTopic(_ context.Context, t *pb.Topic) (*pb.Topic, error) s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(t, "CreateTopic", &pb.Topic{}); handled || err != nil { + return ret.(*pb.Topic), err + } + if s.topics[t.Name] != nil { return nil, status.Errorf(codes.AlreadyExists, "topic %q", t.Name) } + if err := checkMRD(t.MessageRetentionDuration); err != nil { + return nil, err + } top := newTopic(t) s.topics[t.Name] = top return top.proto, nil @@ -229,6 +331,10 @@ func (s *GServer) GetTopic(_ context.Context, req *pb.GetTopicRequest) (*pb.Topi s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(req, "GetTopic", &pb.Topic{}); handled || err != nil { + return ret.(*pb.Topic), err + } + if t := s.topics[req.Topic]; t != nil { return t.proto, nil } @@ -239,6 +345,10 @@ func (s *GServer) UpdateTopic(_ context.Context, req *pb.UpdateTopicRequest) (*p s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(req, "UpdateTopic", &pb.Topic{}); handled || err != nil { + return ret.(*pb.Topic), err + } + t := s.topics[req.Topic.Name] if t == nil { return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic.Name) @@ -249,6 +359,11 @@ func (s *GServer) UpdateTopic(_ context.Context, req *pb.UpdateTopicRequest) (*p t.proto.Labels = req.Topic.Labels case "message_storage_policy": t.proto.MessageStoragePolicy = req.Topic.MessageStoragePolicy + case "message_retention_duration": + if err := checkMRD(req.Topic.MessageRetentionDuration); err != nil { + return nil, err + } + t.proto.MessageRetentionDuration = req.Topic.MessageRetentionDuration default: return nil, status.Errorf(codes.InvalidArgument, "unknown field name %q", path) } @@ -260,6 +375,10 @@ func (s *GServer) ListTopics(_ context.Context, req *pb.ListTopicsRequest) (*pb. s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(req, "ListTopics", &pb.ListTopicsResponse{}); handled || err != nil { + return ret.(*pb.ListTopicsResponse), err + } + var names []string for n := range s.topics { if strings.HasPrefix(n, req.Project) { @@ -282,6 +401,10 @@ func (s *GServer) ListTopicSubscriptions(_ context.Context, req *pb.ListTopicSub s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(req, "ListTopicSubscriptions", &pb.ListTopicSubscriptionsResponse{}); handled || err != nil { + return ret.(*pb.ListTopicSubscriptionsResponse), err + } + var names []string for name, sub := range s.subs { if sub.topic.proto.Name == req.Topic { @@ -303,10 +426,22 @@ func (s *GServer) DeleteTopic(_ context.Context, req *pb.DeleteTopicRequest) (*e s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(req, "DeleteTopic", &emptypb.Empty{}); handled || err != nil { + return ret.(*emptypb.Empty), err + } + t := s.topics[req.Topic] if t == nil { return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) } + for _, sub := range s.subs { + if sub.deadLetterTopic == nil { + continue + } + if req.Topic == sub.deadLetterTopic.proto.Name { + return nil, status.Errorf(codes.FailedPrecondition, "topic %q used as deadLetter for %s", req.Topic, sub.proto.Name) + } + } t.stop() delete(s.topics, req.Topic) return &emptypb.Empty{}, nil @@ -316,6 +451,10 @@ func (s *GServer) CreateSubscription(_ context.Context, ps *pb.Subscription) (*p s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(ps, "CreateSubscription", &pb.Subscription{}); handled || err != nil { + return ret.(*pb.Subscription), err + } + if ps.Name == "" { return nil, status.Errorf(codes.InvalidArgument, "missing name") } @@ -341,8 +480,22 @@ func (s *GServer) CreateSubscription(_ context.Context, ps *pb.Subscription) (*p if ps.PushConfig == nil { ps.PushConfig = &pb.PushConfig{} } + if ps.BigqueryConfig == nil { + ps.BigqueryConfig = &pb.BigQueryConfig{} + } else if ps.BigqueryConfig.Table != "" { + ps.BigqueryConfig.State = pb.BigQueryConfig_ACTIVE + } + ps.TopicMessageRetentionDuration = top.proto.MessageRetentionDuration + var deadLetterTopic *topic + if ps.DeadLetterPolicy != nil { + dlTopic, ok := s.topics[ps.DeadLetterPolicy.DeadLetterTopic] + if !ok { + return nil, status.Errorf(codes.NotFound, "deadLetter topic %q", ps.DeadLetterPolicy.DeadLetterTopic) + } + deadLetterTopic = dlTopic + } - sub := newSubscription(top, &s.mu, ps) + sub := newSubscription(top, &s.mu, s.timeNowFunc, deadLetterTopic, ps) top.subs[ps.Name] = sub s.subs[ps.Name] = sub sub.start(&s.wg) @@ -355,8 +508,9 @@ var minAckDeadlineSecs int32 // SetMinAckDeadline changes the minack deadline to n. Must be // greater than or equal to 1 second. Remember to reset this value // to the default after your test changes it. Example usage: -// pstest.SetMinAckDeadlineSecs(1) -// defer pstest.ResetMinAckDeadlineSecs() +// +// pstest.SetMinAckDeadlineSecs(1) +// defer pstest.ResetMinAckDeadlineSecs() func SetMinAckDeadline(n time.Duration) { if n < time.Second { panic("SetMinAckDeadline expects a value greater than 1 second") @@ -383,11 +537,14 @@ const ( maxMessageRetentionDuration = 168 * time.Hour ) -var defaultMessageRetentionDuration = ptypes.DurationProto(maxMessageRetentionDuration) +var defaultMessageRetentionDuration = durpb.New(maxMessageRetentionDuration) func checkMRD(pmrd *durpb.Duration) error { - mrd, err := ptypes.Duration(pmrd) - if err != nil || mrd < minMessageRetentionDuration || mrd > maxMessageRetentionDuration { + if pmrd == nil { + return nil + } + mrd := pmrd.AsDuration() + if mrd < minMessageRetentionDuration || mrd > maxMessageRetentionDuration { return status.Errorf(codes.InvalidArgument, "bad message_retention_duration %+v", pmrd) } return nil @@ -396,6 +553,11 @@ func checkMRD(pmrd *durpb.Duration) error { func (s *GServer) GetSubscription(_ context.Context, req *pb.GetSubscriptionRequest) (*pb.Subscription, error) { s.mu.Lock() defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "GetSubscription", &pb.Subscription{}); handled || err != nil { + return ret.(*pb.Subscription), err + } + sub, err := s.findSubscription(req.Subscription) if err != nil { return nil, err @@ -409,6 +571,11 @@ func (s *GServer) UpdateSubscription(_ context.Context, req *pb.UpdateSubscripti } s.mu.Lock() defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "UpdateSubscription", &pb.Subscription{}); handled || err != nil { + return ret.(*pb.Subscription), err + } + sub, err := s.findSubscription(req.Subscription.Name) if err != nil { return nil, err @@ -418,6 +585,12 @@ func (s *GServer) UpdateSubscription(_ context.Context, req *pb.UpdateSubscripti case "push_config": sub.proto.PushConfig = req.Subscription.PushConfig + case "bigquery_config": + sub.proto.BigqueryConfig = req.GetSubscription().GetBigqueryConfig() + if sub.proto.GetBigqueryConfig().GetTable() != "" { + sub.proto.GetBigqueryConfig().State = pb.BigQueryConfig_ACTIVE + } + case "ack_deadline_seconds": a := req.Subscription.AckDeadlineSeconds if err := checkAckDeadline(a); err != nil { @@ -440,6 +613,28 @@ func (s *GServer) UpdateSubscription(_ context.Context, req *pb.UpdateSubscripti case "expiration_policy": sub.proto.ExpirationPolicy = req.Subscription.ExpirationPolicy + case "dead_letter_policy": + sub.proto.DeadLetterPolicy = req.Subscription.DeadLetterPolicy + if sub.proto.DeadLetterPolicy != nil { + dlTopic, ok := s.topics[sub.proto.DeadLetterPolicy.DeadLetterTopic] + if !ok { + return nil, status.Errorf(codes.NotFound, "topic %q", sub.proto.DeadLetterPolicy.DeadLetterTopic) + } + sub.deadLetterTopic = dlTopic + } + + case "retry_policy": + sub.proto.RetryPolicy = req.Subscription.RetryPolicy + + case "filter": + sub.proto.Filter = req.Subscription.Filter + + case "enable_exactly_once_delivery": + sub.proto.EnableExactlyOnceDelivery = req.Subscription.EnableExactlyOnceDelivery + for _, st := range sub.streams { + st.enableExactlyOnceDelivery = req.Subscription.EnableExactlyOnceDelivery + } + default: return nil, status.Errorf(codes.InvalidArgument, "unknown field name %q", path) } @@ -451,6 +646,10 @@ func (s *GServer) ListSubscriptions(_ context.Context, req *pb.ListSubscriptions s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(req, "ListSubscriptions", &pb.ListSubscriptionsResponse{}); handled || err != nil { + return ret.(*pb.ListSubscriptionsResponse), err + } + var names []string for name := range s.subs { if strings.HasPrefix(name, req.Project) { @@ -472,6 +671,11 @@ func (s *GServer) ListSubscriptions(_ context.Context, req *pb.ListSubscriptions func (s *GServer) DeleteSubscription(_ context.Context, req *pb.DeleteSubscriptionRequest) (*emptypb.Empty, error) { s.mu.Lock() defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "DeleteSubscription", &emptypb.Empty{}); handled || err != nil { + return ret.(*emptypb.Empty), err + } + sub, err := s.findSubscription(req.Subscription) if err != nil { return nil, err @@ -482,10 +686,30 @@ func (s *GServer) DeleteSubscription(_ context.Context, req *pb.DeleteSubscripti return &emptypb.Empty{}, nil } +func (s *GServer) DetachSubscription(_ context.Context, req *pb.DetachSubscriptionRequest) (*pb.DetachSubscriptionResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "DetachSubscription", &pb.DetachSubscriptionResponse{}); handled || err != nil { + return ret.(*pb.DetachSubscriptionResponse), err + } + + sub, err := s.findSubscription(req.Subscription) + if err != nil { + return nil, err + } + sub.topic.deleteSub(sub) + return &pb.DetachSubscriptionResponse{}, nil +} + func (s *GServer) Publish(_ context.Context, req *pb.PublishRequest) (*pb.PublishResponse, error) { s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(req, "Publish", &pb.PublishResponse{}); handled || err != nil { + return ret.(*pb.PublishResponse), err + } + if req.Topic == "" { return nil, status.Errorf(codes.InvalidArgument, "missing topic") } @@ -493,22 +717,29 @@ func (s *GServer) Publish(_ context.Context, req *pb.PublishRequest) (*pb.Publis if top == nil { return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) } + + if !s.autoPublishResponse { + r := <-s.publishResponses + if r.err != nil { + return nil, r.err + } + return r.resp, nil + } + var ids []string for _, pm := range req.Messages { id := fmt.Sprintf("m%d", s.nextID) s.nextID++ pm.MessageId = id - pubTime := timeNow() - tsPubTime, err := ptypes.TimestampProto(pubTime) - if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) - } + pubTime := s.timeNowFunc() + tsPubTime := timestamppb.New(pubTime) pm.PublishTime = tsPubTime m := &Message{ ID: id, Data: pm.Data, Attributes: pm.Attributes, PublishTime: pubTime, + OrderingKey: pm.OrderingKey, } top.publish(pm, m) ids = append(ids, id) @@ -533,7 +764,6 @@ func newTopic(pt *pb.Topic) *topic { func (t *topic) stop() { for _, sub := range t.subs { sub.proto.Topic = "_deleted-topic_" - sub.stop() } } @@ -557,27 +787,32 @@ func (t *topic) publish(pm *pb.PubsubMessage, m *Message) { } type subscription struct { - topic *topic - mu *sync.Mutex // the server mutex, here for convenience - proto *pb.Subscription - ackTimeout time.Duration - msgs map[string]*message // unacked messages by message ID - streams []*stream - done chan struct{} + topic *topic + deadLetterTopic *topic + mu *sync.Mutex // the server mutex, here for convenience + proto *pb.Subscription + ackTimeout time.Duration + msgs map[string]*message // unacked messages by message ID + streams []*stream + done chan struct{} + timeNowFunc func() time.Time } -func newSubscription(t *topic, mu *sync.Mutex, ps *pb.Subscription) *subscription { +func newSubscription(t *topic, mu *sync.Mutex, timeNowFunc func() time.Time, deadLetterTopic *topic, ps *pb.Subscription) *subscription { at := time.Duration(ps.AckDeadlineSeconds) * time.Second if at == 0 { at = 10 * time.Second } + ps.State = pb.Subscription_ACTIVE return &subscription{ - topic: t, - mu: mu, - proto: ps, - ackTimeout: at, - msgs: map[string]*message{}, - done: make(chan struct{}), + topic: t, + deadLetterTopic: deadLetterTopic, + mu: mu, + proto: ps, + ackTimeout: at, + msgs: map[string]*message{}, + done: make(chan struct{}), + timeNowFunc: timeNowFunc, } } @@ -604,6 +839,10 @@ func (s *GServer) Acknowledge(_ context.Context, req *pb.AcknowledgeRequest) (*e s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(req, "Acknowledge", &emptypb.Empty{}); handled || err != nil { + return ret.(*emptypb.Empty), err + } + sub, err := s.findSubscription(req.Subscription) if err != nil { return nil, err @@ -617,13 +856,18 @@ func (s *GServer) Acknowledge(_ context.Context, req *pb.AcknowledgeRequest) (*e func (s *GServer) ModifyAckDeadline(_ context.Context, req *pb.ModifyAckDeadlineRequest) (*emptypb.Empty, error) { s.mu.Lock() defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "ModifyAckDeadline", &emptypb.Empty{}); handled || err != nil { + return ret.(*emptypb.Empty), err + } + sub, err := s.findSubscription(req.Subscription) if err != nil { return nil, err } now := time.Now() for _, id := range req.AckIds { - s.msgsByID[id].Modacks = append(s.msgsByID[id].Modacks, Modack{AckID: id, AckDeadline: req.AckDeadlineSeconds, ReceivedAt: now}) + s.msgsByID[id].modacks = append(s.msgsByID[id].modacks, Modack{AckID: id, AckDeadline: req.AckDeadlineSeconds, ReceivedAt: now}) } dur := secsToDur(req.AckDeadlineSeconds) for _, id := range req.AckIds { @@ -634,6 +878,12 @@ func (s *GServer) ModifyAckDeadline(_ context.Context, req *pb.ModifyAckDeadline func (s *GServer) Pull(ctx context.Context, req *pb.PullRequest) (*pb.PullResponse, error) { s.mu.Lock() + + if handled, ret, err := s.runReactor(req, "Pull", &pb.PullResponse{}); handled || err != nil { + s.mu.Unlock() + return ret.(*pb.PullResponse), err + } + sub, err := s.findSubscription(req.Subscription) if err != nil { s.mu.Unlock() @@ -683,6 +933,7 @@ func (s *GServer) StreamingPull(sps pb.Subscriber_StreamingPullServer) error { } // Create a new stream to handle the pull. st := sub.newStream(sps, s.streamTimeout) + st.ackTimeout = time.Duration(req.StreamAckDeadlineSeconds) * time.Second err = st.pull(&s.wg) sub.deleteStream(st) return err @@ -696,11 +947,7 @@ func (s *GServer) Seek(ctx context.Context, req *pb.SeekRequest) (*pb.SeekRespon case nil: return nil, status.Errorf(codes.InvalidArgument, "missing Seek target type") case *pb.SeekRequest_Time: - var err error - target, err = ptypes.Timestamp(v.Time) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "bad Time target: %v", err) - } + target = v.Time.AsTime() default: return nil, status.Errorf(codes.Unimplemented, "unhandled Seek target type %T", v) } @@ -709,6 +956,11 @@ func (s *GServer) Seek(ctx context.Context, req *pb.SeekRequest) (*pb.SeekRespon // because the messages don't have any other synchronization. s.mu.Lock() defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "Seek", &pb.SeekResponse{}); handled || err != nil { + return ret.(*pb.SeekResponse), err + } + sub, err := s.findSubscription(req.Subscription) if err != nil { return nil, err @@ -756,13 +1008,21 @@ func (s *GServer) findSubscription(name string) (*subscription, error) { // Must be called with the lock held. func (s *subscription) pull(max int) []*pb.ReceivedMessage { - now := timeNow() + now := s.timeNowFunc() s.maintainMessages(now) var msgs []*pb.ReceivedMessage - for _, m := range s.msgs { + for id, m := range filterMsgs(s.msgs, s.proto.EnableMessageOrdering) { if m.outstanding() { continue } + if s.deadLetterCandidate(m) { + s.ack(id) + s.publishToDeadLetter(m) + continue + } + if s.proto.DeadLetterPolicy != nil { + m.proto.DeliveryAttempt = int32(*m.deliveries) + } (*m.deliveries)++ m.ackDeadline = now.Add(s.ackTimeout) msgs = append(msgs, m.proto) @@ -773,18 +1033,49 @@ func (s *subscription) pull(max int) []*pb.ReceivedMessage { return msgs } +func filterMsgs(msgs map[string]*message, enableMessageOrdering bool) map[string]*message { + if !enableMessageOrdering { + return msgs + } + result := make(map[string]*message) + + type msg struct { + id string + m *message + } + orderingKeyMap := make(map[string]msg) + for id, m := range msgs { + orderingKey := m.proto.Message.OrderingKey + if orderingKey == "" { + orderingKey = id + } + if val, ok := orderingKeyMap[orderingKey]; !ok || m.proto.Message.PublishTime.AsTime().Before(val.m.proto.Message.PublishTime.AsTime()) { + orderingKeyMap[orderingKey] = msg{m: m, id: id} + } + } + for _, val := range orderingKeyMap { + result[val.id] = val.m + } + return result +} + func (s *subscription) deliver() { s.mu.Lock() defer s.mu.Unlock() - now := timeNow() + now := s.timeNowFunc() s.maintainMessages(now) // Try to deliver each remaining message. curIndex := 0 - for _, m := range s.msgs { + for id, m := range filterMsgs(s.msgs, s.proto.EnableMessageOrdering) { if m.outstanding() { continue } + if s.deadLetterCandidate(m) { + s.ack(id) + s.publishToDeadLetter(m) + continue + } // If the message was never delivered before, start with the stream at // curIndex. If it was delivered before, start with the stream after the one // that owned it. @@ -843,12 +1134,10 @@ func (s *subscription) maintainMessages(now time.Time) { if m.outstanding() && now.After(m.ackDeadline) { m.makeAvailable() } - pubTime, err := ptypes.Timestamp(m.proto.Message.PublishTime) - if err != nil { - panic(err) - } + pubTime := m.proto.Message.PublishTime.AsTime() // Remove messages that have been undelivered for a long time. if !m.outstanding() && now.Sub(pubTime) > retentionDuration { + s.publishToDeadLetter(m) delete(s.msgs, id) } } @@ -856,12 +1145,14 @@ func (s *subscription) maintainMessages(now time.Time) { func (s *subscription) newStream(gs pb.Subscriber_StreamingPullServer, timeout time.Duration) *stream { st := &stream{ - sub: s, - done: make(chan struct{}), - msgc: make(chan *pb.ReceivedMessage), - gstream: gs, - ackTimeout: s.ackTimeout, - timeout: timeout, + sub: s, + done: make(chan struct{}), + msgc: make(chan *pb.ReceivedMessage), + gstream: gs, + ackTimeout: s.ackTimeout, + timeout: timeout, + enableExactlyOnceDelivery: s.proto.EnableExactlyOnceDelivery, + enableOrdering: s.proto.EnableMessageOrdering, } s.mu.Lock() s.streams = append(s.streams, st) @@ -882,6 +1173,33 @@ func (s *subscription) deleteStream(st *stream) { s.streams = deleteStreamAt(s.streams, i) } } + +func (s *subscription) deadLetterCandidate(m *message) bool { + if s.proto.DeadLetterPolicy == nil { + return false + } + if m.retriesDone(s.proto.DeadLetterPolicy.MaxDeliveryAttempts) { + return true + } + return false +} + +func (s *subscription) publishToDeadLetter(m *message) { + acks := 0 + if m.acks != nil { + acks = *m.acks + } + deliveries := 0 + if m.deliveries != nil { + deliveries = *m.deliveries + } + s.deadLetterTopic.publish(m.proto.Message, &Message{ + PublishTime: m.publishTime, + Acks: acks, + Deliveries: deliveries, + }) +} + func deleteStreamAt(s []*stream, i int) []*stream { // Preserve order for round-robin delivery. return append(s[:i], s[i+1:]...) @@ -901,17 +1219,24 @@ func (m *message) outstanding() bool { return !m.ackDeadline.IsZero() } +// A message is outstanding if it is owned by some stream. +func (m *message) retriesDone(maxRetries int32) bool { + return m.deliveries != nil && int32(*m.deliveries) >= maxRetries +} + func (m *message) makeAvailable() { m.ackDeadline = time.Time{} } type stream struct { - sub *subscription - done chan struct{} // closed when the stream is finished - msgc chan *pb.ReceivedMessage - gstream pb.Subscriber_StreamingPullServer - ackTimeout time.Duration - timeout time.Duration + sub *subscription + done chan struct{} // closed when the stream is finished + msgc chan *pb.ReceivedMessage + gstream pb.Subscriber_StreamingPullServer + ackTimeout time.Duration + timeout time.Duration + enableExactlyOnceDelivery bool + enableOrdering bool } // pull manages the StreamingPull interaction for the life of the stream. @@ -949,7 +1274,13 @@ func (st *stream) sendLoop() error { case <-st.done: return nil case rm := <-st.msgc: - res := &pb.StreamingPullResponse{ReceivedMessages: []*pb.ReceivedMessage{rm}} + res := &pb.StreamingPullResponse{ + ReceivedMessages: []*pb.ReceivedMessage{rm}, + SubscriptionProperties: &pb.StreamingPullResponse_SubscriptionProperties{ + ExactlyOnceDeliveryEnabled: st.enableExactlyOnceDelivery, + MessageOrderingEnabled: st.enableOrdering, + }, + } if err := st.gstream.Send(res); err != nil { return err } @@ -1001,10 +1332,162 @@ func (s *subscription) modifyAckDeadline(id string, d time.Duration) { if d == 0 { // nack m.makeAvailable() } else { // extend the deadline by d - m.ackDeadline = timeNow().Add(d) + m.ackDeadline = s.timeNowFunc().Add(d) } } func secsToDur(secs int32) time.Duration { return time.Duration(secs) * time.Second } + +// runReactor looks up the reactors for a function, then launches them until handled=true +// or err is returned. If the reactor returns nil, the function returns defaultObj instead. +func (s *GServer) runReactor(req interface{}, funcName string, defaultObj interface{}) (bool, interface{}, error) { + if val, ok := s.reactorOptions[funcName]; ok { + for _, reactor := range val { + handled, ret, err := reactor.React(req) + // If handled=true, that means the reactor has successfully reacted to the request, + // so use the output directly. If err occurs, that means the request is invalidated + // by the reactor somehow. + if handled || err != nil { + if ret == nil { + ret = defaultObj + } + return true, ret, err + } + } + } + return false, nil, nil +} + +// errorInjectionReactor is a reactor to inject an error message with status code. +type errorInjectionReactor struct { + code codes.Code + msg string +} + +// React simply returns an error with defined error message and status code. +func (e *errorInjectionReactor) React(_ interface{}) (handled bool, ret interface{}, err error) { + return true, nil, status.Errorf(e.code, e.msg) +} + +// WithErrorInjection creates a ServerReactorOption that injects error with defined status code and +// message for a certain function. +func WithErrorInjection(funcName string, code codes.Code, msg string) ServerReactorOption { + return ServerReactorOption{ + FuncName: funcName, + Reactor: &errorInjectionReactor{code: code, msg: msg}, + } +} + +func (s *GServer) CreateSchema(_ context.Context, req *pb.CreateSchemaRequest) (*pb.Schema, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "CreateSchema", &pb.Schema{}); handled || err != nil { + return ret.(*pb.Schema), err + } + + name := fmt.Sprintf("%s/schemas/%s", req.Parent, req.SchemaId) + sc := &pb.Schema{ + Name: name, + Type: req.Schema.Type, + Definition: req.Schema.Definition, + } + s.schemas[name] = sc + + return sc, nil +} + +func (s *GServer) GetSchema(_ context.Context, req *pb.GetSchemaRequest) (*pb.Schema, error) { + + s.mu.Lock() + defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "GetSchema", &pb.Schema{}); handled || err != nil { + return ret.(*pb.Schema), err + } + + sc, ok := s.schemas[req.Name] + if !ok { + return nil, status.Errorf(codes.NotFound, "schema(%q) not found", req.Name) + } + return sc, nil +} + +func (s *GServer) ListSchemas(_ context.Context, req *pb.ListSchemasRequest) (*pb.ListSchemasResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "ListSchemas", &pb.ListSchemasResponse{}); handled || err != nil { + return ret.(*pb.ListSchemasResponse), err + } + ss := make([]*pb.Schema, 0) + for _, sc := range s.schemas { + ss = append(ss, sc) + } + return &pb.ListSchemasResponse{ + Schemas: ss, + }, nil +} + +func (s *GServer) DeleteSchema(_ context.Context, req *pb.DeleteSchemaRequest) (*emptypb.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "DeleteSchema", &emptypb.Empty{}); handled || err != nil { + return ret.(*emptypb.Empty), err + } + + schema := s.schemas[req.Name] + if schema == nil { + return nil, status.Errorf(codes.NotFound, "schema %q", req.Name) + } + + delete(s.schemas, req.Name) + return &emptypb.Empty{}, nil +} + +// ValidateSchema mocks the ValidateSchema call but only checks that the schema definition is not empty. +func (s *GServer) ValidateSchema(_ context.Context, req *pb.ValidateSchemaRequest) (*pb.ValidateSchemaResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "ValidateSchema", &pb.ValidateSchemaResponse{}); handled || err != nil { + return ret.(*pb.ValidateSchemaResponse), err + } + + if req.Schema.Definition == "" { + return nil, status.Error(codes.InvalidArgument, "schema definition cannot be empty") + } + return &pb.ValidateSchemaResponse{}, nil +} + +// ValidateMessage mocks the ValidateMessage call but only checks that the schema definition to validate the +// message against is not empty. +func (s *GServer) ValidateMessage(_ context.Context, req *pb.ValidateMessageRequest) (*pb.ValidateMessageResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "ValidateMessage", &pb.ValidateMessageResponse{}); handled || err != nil { + return ret.(*pb.ValidateMessageResponse), err + } + + spec := req.GetSchemaSpec() + if valReq, ok := spec.(*pb.ValidateMessageRequest_Name); ok { + sc, ok := s.schemas[valReq.Name] + if !ok { + return nil, status.Errorf(codes.NotFound, "schema(%q) not found", valReq.Name) + } + if sc.Definition == "" { + return nil, status.Error(codes.InvalidArgument, "schema definition cannot be empty") + } + } + if valReq, ok := spec.(*pb.ValidateMessageRequest_Schema); ok { + if valReq.Schema.Definition == "" { + return nil, status.Error(codes.InvalidArgument, "schema definition cannot be empty") + } + } + + return &pb.ValidateMessageResponse{}, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/pubsub.go b/vendor/cloud.google.com/go/pubsub/pubsub.go index a14f6699784b9..0b44ea5583e4f 100644 --- a/vendor/cloud.google.com/go/pubsub/pubsub.go +++ b/vendor/cloud.google.com/go/pubsub/pubsub.go @@ -18,11 +18,14 @@ import ( "context" "fmt" "os" + "reflect" "runtime" + "strings" "time" - "cloud.google.com/go/internal/version" vkit "cloud.google.com/go/pubsub/apiv1" + "cloud.google.com/go/pubsub/internal" + gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/option" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" @@ -36,8 +39,6 @@ const ( // ScopeCloudPlatform grants permissions to view and manage your data // across Google Cloud Platform services. ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" - - maxAckDeadline = 10 * time.Minute ) // Client is a Google Pub/Sub client scoped to a single project. @@ -50,17 +51,85 @@ type Client struct { subc *vkit.SubscriberClient } -// NewClient creates a new PubSub client. +// ClientConfig has configurations for the client. +type ClientConfig struct { + PublisherCallOptions *vkit.PublisherCallOptions + SubscriberCallOptions *vkit.SubscriberCallOptions +} + +// mergePublisherCallOptions merges two PublisherCallOptions into one and the first argument has +// a lower order of precedence than the second one. If either is nil, return the other. +func mergePublisherCallOptions(a *vkit.PublisherCallOptions, b *vkit.PublisherCallOptions) *vkit.PublisherCallOptions { + if a == nil { + return b + } + if b == nil { + return a + } + res := &vkit.PublisherCallOptions{} + resVal := reflect.ValueOf(res).Elem() + aVal := reflect.ValueOf(a).Elem() + bVal := reflect.ValueOf(b).Elem() + + t := aVal.Type() + + for i := 0; i < aVal.NumField(); i++ { + fieldName := t.Field(i).Name + + aFieldVal := aVal.Field(i).Interface().([]gax.CallOption) + bFieldVal := bVal.Field(i).Interface().([]gax.CallOption) + + merged := append(aFieldVal, bFieldVal...) + resVal.FieldByName(fieldName).Set(reflect.ValueOf(merged)) + } + return res +} + +// mergeSubscribercallOptions merges two SubscriberCallOptions into one and the first argument has +// a lower order of precedence than the second one. If either is nil, the other is used. +func mergeSubscriberCallOptions(a *vkit.SubscriberCallOptions, b *vkit.SubscriberCallOptions) *vkit.SubscriberCallOptions { + if a == nil { + return b + } + if b == nil { + return a + } + res := &vkit.SubscriberCallOptions{} + resVal := reflect.ValueOf(res).Elem() + aVal := reflect.ValueOf(a).Elem() + bVal := reflect.ValueOf(b).Elem() + + t := aVal.Type() + + for i := 0; i < aVal.NumField(); i++ { + fieldName := t.Field(i).Name + + aFieldVal := aVal.Field(i).Interface().([]gax.CallOption) + bFieldVal := bVal.Field(i).Interface().([]gax.CallOption) + + merged := append(aFieldVal, bFieldVal...) + resVal.FieldByName(fieldName).Set(reflect.ValueOf(merged)) + } + return res +} + +// NewClient creates a new PubSub client. It uses a default configuration. func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (c *Client, err error) { + return NewClientWithConfig(ctx, projectID, nil, opts...) +} + +// NewClientWithConfig creates a new PubSub client. +func NewClientWithConfig(ctx context.Context, projectID string, config *ClientConfig, opts ...option.ClientOption) (c *Client, err error) { var o []option.ClientOption // Environment variables for gcloud emulator: // https://cloud.google.com/sdk/gcloud/reference/beta/emulators/pubsub/ if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" { conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { - return nil, fmt.Errorf("grpc.Dial: %v", err) + return nil, fmt.Errorf("grpc.Dial: %w", err) } o = []option.ClientOption{option.WithGRPCConn(conn)} + o = append(o, option.WithTelemetryDisabled()) } else { numConns := runtime.GOMAXPROCS(0) if numConns > 4 { @@ -73,21 +142,21 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio Time: 5 * time.Minute, })), } - o = append(o, openCensusOptions()...) } o = append(o, opts...) pubc, err := vkit.NewPublisherClient(ctx, o...) if err != nil { - return nil, fmt.Errorf("pubsub: %v", err) + return nil, fmt.Errorf("pubsub(publisher): %w", err) } - subc, err := vkit.NewSubscriberClient(ctx, option.WithGRPCConn(pubc.Connection())) + subc, err := vkit.NewSubscriberClient(ctx, o...) if err != nil { - // Should never happen, since we are passing in the connection. - // If it does, we cannot close, because the user may have passed in their - // own connection originally. - return nil, fmt.Errorf("pubsub: %v", err) + return nil, fmt.Errorf("pubsub(subscriber): %w", err) + } + if config != nil { + pubc.CallOptions = mergePublisherCallOptions(pubc.CallOptions, config.PublisherCallOptions) + subc.CallOptions = mergeSubscriberCallOptions(subc.CallOptions, config.SubscriberCallOptions) } - pubc.SetGoogleClientInfo("gccl", version.Repo) + pubc.SetGoogleClientInfo("gccl", internal.Version) return &Client{ projectID: projectID, pubc: pubc, @@ -101,10 +170,22 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio // If the client is available for the lifetime of the program, then Close need not be // called at exit. func (c *Client) Close() error { - // Return the first error, because the first call closes the connection. - err := c.pubc.Close() - _ = c.subc.Close() - return err + pubErr := c.pubc.Close() + subErr := c.subc.Close() + if pubErr != nil { + return fmt.Errorf("pubsub publisher closing error: %w", pubErr) + } + if subErr != nil { + // Suppress client connection closing errors. This will only happen + // when using the client in conjunction with the Pub/Sub emulator + // or fake (pstest). Closing both clients separately will never + // return this error against the live Pub/Sub service. + if strings.Contains(subErr.Error(), "the client connection is closing") { + return nil + } + return fmt.Errorf("pubsub subscriber closing error: %w", subErr) + } + return nil } func (c *Client) fullyQualifiedProjectName() string { diff --git a/vendor/cloud.google.com/go/pubsub/pullstream.go b/vendor/cloud.google.com/go/pubsub/pullstream.go index dd0e6c6805bea..2137cdb9f6eb3 100644 --- a/vendor/cloud.google.com/go/pubsub/pullstream.go +++ b/vendor/cloud.google.com/go/pubsub/pullstream.go @@ -28,8 +28,9 @@ import ( // A pullStream supports the methods of a StreamingPullClient, but re-opens // the stream on a retryable error. type pullStream struct { - ctx context.Context - open func() (pb.Subscriber_StreamingPullClient, error) + ctx context.Context + open func() (pb.Subscriber_StreamingPullClient, error) + cancel context.CancelFunc mu sync.Mutex spc *pb.Subscriber_StreamingPullClient @@ -39,18 +40,27 @@ type pullStream struct { // for testing type streamingPullFunc func(context.Context, ...gax.CallOption) (pb.Subscriber_StreamingPullClient, error) -func newPullStream(ctx context.Context, streamingPull streamingPullFunc, subName string) *pullStream { +func newPullStream(ctx context.Context, streamingPull streamingPullFunc, subName string, maxOutstandingMessages, maxOutstandingBytes int, maxDurationPerLeaseExtension time.Duration) *pullStream { ctx = withSubscriptionKey(ctx, subName) + ctx, cancel := context.WithCancel(ctx) return &pullStream{ - ctx: ctx, + ctx: ctx, + cancel: cancel, open: func() (pb.Subscriber_StreamingPullClient, error) { spc, err := streamingPull(ctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) if err == nil { recordStat(ctx, StreamRequestCount, 1) + streamAckDeadline := int32(maxDurationPerLeaseExtension / time.Second) + // By default, maxDurationPerLeaseExtension, aka MaxExtensionPeriod, is disabled, + // so in these cases, use a healthy default of 60 seconds. + if streamAckDeadline <= 0 { + streamAckDeadline = 60 + } err = spc.Send(&pb.StreamingPullRequest{ - Subscription: subName, - // We modack messages when we receive them, so this value doesn't matter too much. - StreamAckDeadlineSeconds: 60, + Subscription: subName, + StreamAckDeadlineSeconds: streamAckDeadline, + MaxOutstandingMessages: int64(maxOutstandingMessages), + MaxOutstandingBytes: int64(maxOutstandingBytes), }) } if err != nil { diff --git a/vendor/cloud.google.com/go/pubsub/schema.go b/vendor/cloud.google.com/go/pubsub/schema.go new file mode 100644 index 0000000000000..1d3ff03af6bcd --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/schema.go @@ -0,0 +1,263 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "context" + "fmt" + + "google.golang.org/api/option" + + vkit "cloud.google.com/go/pubsub/apiv1" + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +// SchemaClient is a Pub/Sub schema client scoped to a single project. +type SchemaClient struct { + sc *vkit.SchemaClient + projectID string +} + +// Close closes the schema client and frees up resources. +func (s *SchemaClient) Close() error { + return s.sc.Close() +} + +// NewSchemaClient creates a new Pub/Sub Schema client. +func NewSchemaClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*SchemaClient, error) { + sc, err := vkit.NewSchemaClient(ctx, opts...) + if err != nil { + return nil, err + } + return &SchemaClient{sc: sc, projectID: projectID}, nil +} + +// SchemaConfig is a reference to a PubSub schema. +type SchemaConfig struct { + // The name of the schema populated by the server. This field is read-only. + Name string + + // The type of the schema definition. + Type SchemaType + + // The definition of the schema. This should contain a string representing + // the full definition of the schema that is a valid schema definition of + // the type specified in `type`. + Definition string +} + +// SchemaType is the possible shcema definition types. +type SchemaType pb.Schema_Type + +const ( + // SchemaTypeUnspecified is the unused default value. + SchemaTypeUnspecified SchemaType = 0 + // SchemaProtocolBuffer is a protobuf schema definition. + SchemaProtocolBuffer SchemaType = 1 + // SchemaAvro is an Avro schema definition. + SchemaAvro SchemaType = 2 +) + +// SchemaView is a view of Schema object fields to be returned +// by GetSchema and ListSchemas. +type SchemaView pb.SchemaView + +const ( + // SchemaViewUnspecified is the default/unset value. + SchemaViewUnspecified SchemaView = 0 + // SchemaViewBasic includes the name and type of the schema, but not the definition. + SchemaViewBasic SchemaView = 1 + // SchemaViewFull includes all Schema object fields. + SchemaViewFull SchemaView = 2 +) + +// SchemaSettings are settings for validating messages +// published against a schema. +type SchemaSettings struct { + Schema string + Encoding SchemaEncoding +} + +func schemaSettingsToProto(schema *SchemaSettings) *pb.SchemaSettings { + if schema == nil { + return nil + } + return &pb.SchemaSettings{ + Schema: schema.Schema, + Encoding: pb.Encoding(schema.Encoding), + } +} + +func protoToSchemaSettings(pbs *pb.SchemaSettings) *SchemaSettings { + if pbs == nil { + return nil + } + return &SchemaSettings{ + Schema: pbs.Schema, + Encoding: SchemaEncoding(pbs.Encoding), + } +} + +// SchemaEncoding is the encoding expected for messages. +type SchemaEncoding pb.Encoding + +const ( + // EncodingUnspecified is the default unused value. + EncodingUnspecified SchemaEncoding = 0 + // EncodingJSON is the JSON encoding type for a message. + EncodingJSON SchemaEncoding = 1 + // EncodingBinary is the binary encoding type for a message. + // For some schema types, binary encoding may not be available. + EncodingBinary SchemaEncoding = 2 +) + +func (s *SchemaConfig) toProto() *pb.Schema { + pbs := &pb.Schema{ + Name: s.Name, + Type: pb.Schema_Type(s.Type), + Definition: s.Definition, + } + return pbs +} + +func protoToSchemaConfig(pbs *pb.Schema) *SchemaConfig { + return &SchemaConfig{ + Name: pbs.Name, + Type: SchemaType(pbs.Type), + Definition: pbs.Definition, + } +} + +// CreateSchema creates a new schema with the given schemaID +// and config. Schemas cannot be updated after creation. +func (c *SchemaClient) CreateSchema(ctx context.Context, schemaID string, s SchemaConfig) (*SchemaConfig, error) { + req := &pb.CreateSchemaRequest{ + Parent: fmt.Sprintf("projects/%s", c.projectID), + Schema: s.toProto(), + SchemaId: schemaID, + } + pbs, err := c.sc.CreateSchema(ctx, req) + if err != nil { + return nil, err + } + return protoToSchemaConfig(pbs), nil +} + +// Schema retrieves the configuration of a schema given a schemaID and a view. +func (c *SchemaClient) Schema(ctx context.Context, schemaID string, view SchemaView) (*SchemaConfig, error) { + schemaPath := fmt.Sprintf("projects/%s/schemas/%s", c.projectID, schemaID) + req := &pb.GetSchemaRequest{ + Name: schemaPath, + View: pb.SchemaView(view), + } + s, err := c.sc.GetSchema(ctx, req) + if err != nil { + return nil, err + } + return protoToSchemaConfig(s), nil +} + +// Schemas returns an iterator which returns all of the schemas for the client's project. +func (c *SchemaClient) Schemas(ctx context.Context, view SchemaView) *SchemaIterator { + return &SchemaIterator{ + it: c.sc.ListSchemas(ctx, &pb.ListSchemasRequest{ + Parent: fmt.Sprintf("projects/%s", c.projectID), + View: pb.SchemaView(view), + }), + } +} + +// SchemaIterator is a struct used to iterate over schemas. +type SchemaIterator struct { + it *vkit.SchemaIterator + err error +} + +// Next returns the next schema. If there are no more schemas, iterator.Done will be returned. +func (s *SchemaIterator) Next() (*SchemaConfig, error) { + if s.err != nil { + return nil, s.err + } + pbs, err := s.it.Next() + if err != nil { + return nil, err + } + return protoToSchemaConfig(pbs), nil +} + +// DeleteSchema deletes an existing schema given a schema ID. +func (s *SchemaClient) DeleteSchema(ctx context.Context, schemaID string) error { + schemaPath := fmt.Sprintf("projects/%s/schemas/%s", s.projectID, schemaID) + return s.sc.DeleteSchema(ctx, &pb.DeleteSchemaRequest{ + Name: schemaPath, + }) +} + +// ValidateSchemaResult is the response for the ValidateSchema method. +// Reserved for future use. +type ValidateSchemaResult struct{} + +// ValidateSchema validates a schema config and returns an error if invalid. +func (s *SchemaClient) ValidateSchema(ctx context.Context, schema SchemaConfig) (*ValidateSchemaResult, error) { + req := &pb.ValidateSchemaRequest{ + Parent: fmt.Sprintf("projects/%s", s.projectID), + Schema: schema.toProto(), + } + _, err := s.sc.ValidateSchema(ctx, req) + if err != nil { + return nil, err + } + return &ValidateSchemaResult{}, nil +} + +// ValidateMessageResult is the response for the ValidateMessage method. +// Reserved for future use. +type ValidateMessageResult struct{} + +// ValidateMessageWithConfig validates a message against an schema specified +// by a schema config. +func (s *SchemaClient) ValidateMessageWithConfig(ctx context.Context, msg []byte, encoding SchemaEncoding, config SchemaConfig) (*ValidateMessageResult, error) { + req := &pb.ValidateMessageRequest{ + Parent: fmt.Sprintf("projects/%s", s.projectID), + SchemaSpec: &pb.ValidateMessageRequest_Schema{ + Schema: config.toProto(), + }, + Message: msg, + Encoding: pb.Encoding(encoding), + } + _, err := s.sc.ValidateMessage(ctx, req) + if err != nil { + return nil, err + } + return &ValidateMessageResult{}, nil +} + +// ValidateMessageWithID validates a message against an schema specified +// by the schema ID of an existing schema. +func (s *SchemaClient) ValidateMessageWithID(ctx context.Context, msg []byte, encoding SchemaEncoding, schemaID string) (*ValidateMessageResult, error) { + req := &pb.ValidateMessageRequest{ + Parent: fmt.Sprintf("projects/%s", s.projectID), + SchemaSpec: &pb.ValidateMessageRequest_Name{ + Name: fmt.Sprintf("projects/%s/schemas/%s", s.projectID, schemaID), + }, + Message: msg, + Encoding: pb.Encoding(encoding), + } + _, err := s.sc.ValidateMessage(ctx, req) + if err != nil { + return nil, err + } + return &ValidateMessageResult{}, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/service.go b/vendor/cloud.google.com/go/pubsub/service.go index a22b9147fccc8..928a77d3b1c54 100644 --- a/vendor/cloud.google.com/go/pubsub/service.go +++ b/vendor/cloud.google.com/go/pubsub/service.go @@ -15,13 +15,11 @@ package pubsub import ( - "fmt" "math" "strings" "time" gax "github.com/googleapis/gax-go/v2" - pb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -37,18 +35,6 @@ const ( maxSendRecvBytes = 20 * 1024 * 1024 // 20M ) -func convertMessages(rms []*pb.ReceivedMessage) ([]*Message, error) { - msgs := make([]*Message, 0, len(rms)) - for i, m := range rms { - msg, err := toMessage(m) - if err != nil { - return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m) - } - msgs = append(msgs, msg) - } - return msgs, nil -} - func trunc32(i int64) int32 { if i > math.MaxInt32 { i = math.MaxInt32 @@ -61,7 +47,7 @@ type defaultRetryer struct { } // Logic originally from -// https://github.com/GoogleCloudPlatform/google-cloud-java/blob/master/google-cloud-clients/google-cloud-pubsub/src/main/java/com/google/cloud/pubsub/v1/StatusUtil.java +// https://github.com/googleapis/java-pubsub/blob/main/google-cloud-pubsub/src/main/java/com/google/cloud/pubsub/v1/StatusUtil.java func (r *defaultRetryer) Retry(err error) (pause time.Duration, shouldRetry bool) { s, ok := status.FromError(err) if !ok { // includes io.EOF, normal stream close, which causes us to reopen @@ -76,6 +62,13 @@ func (r *defaultRetryer) Retry(err error) (pause time.Duration, shouldRetry bool return r.bo.Pause(), true } return 0, false + case codes.Unknown: + // Retry GOAWAY, see https://github.com/googleapis/google-cloud-go/issues/4257. + isGoaway := strings.Contains(s.Message(), "received prior goaway: code: NO_ERROR") + if isGoaway { + return r.bo.Pause(), true + } + return 0, false default: return 0, false } @@ -98,3 +91,42 @@ func (r *streamingPullRetryer) Retry(err error) (pause time.Duration, shouldRetr return r.defaultRetryer.Retry(err) } } + +type publishRetryer struct { + defaultRetryer gax.Retryer +} + +func (r *publishRetryer) Retry(err error) (pause time.Duration, shouldRetry bool) { + s, ok := status.FromError(err) + if !ok { + return r.defaultRetryer.Retry(err) + } + if s.Code() == codes.Internal && strings.Contains(s.Message(), "string field contains invalid UTF-8") { + return 0, false + } + return r.defaultRetryer.Retry(err) +} + +var ( + exactlyOnceDeliveryTemporaryRetryErrors = map[codes.Code]struct{}{ + codes.DeadlineExceeded: {}, + codes.ResourceExhausted: {}, + codes.Aborted: {}, + codes.Internal: {}, + codes.Unavailable: {}, + } +) + +// contains checks if grpc code v is in t, a set of retryable error codes. +func contains(v codes.Code, t map[codes.Code]struct{}) bool { + _, ok := t[v] + return ok +} + +func newExactlyOnceBackoff() gax.Backoff { + return gax.Backoff{ + Initial: 1 * time.Second, + Max: 64 * time.Second, + Multiplier: 2, + } +} diff --git a/vendor/cloud.google.com/go/pubsub/snapshot.go b/vendor/cloud.google.com/go/pubsub/snapshot.go index c2a28d7819904..78797f36475d2 100644 --- a/vendor/cloud.google.com/go/pubsub/snapshot.go +++ b/vendor/cloud.google.com/go/pubsub/snapshot.go @@ -20,8 +20,9 @@ import ( "strings" "time" - "github.com/golang/protobuf/ptypes" pb "google.golang.org/genproto/googleapis/pubsub/v1" + fmpb "google.golang.org/genproto/protobuf/field_mask" + "google.golang.org/protobuf/types/known/timestamppb" ) // Snapshot is a reference to a PubSub snapshot. @@ -42,11 +43,30 @@ func (s *Snapshot) ID() string { return s.name[slash+1:] } +// SetLabels sets or replaces the labels on a given snapshot. +func (s *Snapshot) SetLabels(ctx context.Context, label map[string]string) (*SnapshotConfig, error) { + sc, err := s.c.subc.UpdateSnapshot(ctx, &pb.UpdateSnapshotRequest{ + Snapshot: &pb.Snapshot{ + Name: s.name, + Labels: label, + }, + UpdateMask: &fmpb.FieldMask{ + Paths: []string{"labels"}, + }, + }) + if err != nil { + return nil, err + } + return toSnapshotConfig(sc, s.c) +} + // SnapshotConfig contains the details of a Snapshot. type SnapshotConfig struct { *Snapshot Topic *Topic Expiration time.Time + // The set of labels for the snapshot. + Labels map[string]string } // Snapshot creates a reference to a snapshot. @@ -100,11 +120,8 @@ func (s *Snapshot) Delete(ctx context.Context) error { // creation time), only retained messages will be marked as unacknowledged, // and already-expunged messages will not be restored. func (s *Subscription) SeekToTime(ctx context.Context, t time.Time) error { - ts, err := ptypes.TimestampProto(t) - if err != nil { - return err - } - _, err = s.c.subc.Seek(ctx, &pb.SeekRequest{ + ts := timestamppb.New(t) + _, err := s.c.subc.Seek(ctx, &pb.SeekRequest{ Subscription: s.name, Target: &pb.SeekRequest_Time{Time: ts}, }) @@ -116,11 +133,12 @@ func (s *Subscription) SeekToTime(ctx context.Context, t time.Time) error { // If the name is empty string, a unique name is assigned. // // The created snapshot is guaranteed to retain: -// (a) The existing backlog on the subscription. More precisely, this is -// defined as the messages in the subscription's backlog that are -// unacknowledged when Snapshot returns without error. -// (b) Any messages published to the subscription's topic following -// Snapshot returning without error. +// +// (a) The existing backlog on the subscription. More precisely, this is +// defined as the messages in the subscription's backlog that are +// unacknowledged when Snapshot returns without error. +// (b) Any messages published to the subscription's topic following +// Snapshot returning without error. func (s *Subscription) CreateSnapshot(ctx context.Context, name string) (*SnapshotConfig, error) { if name != "" { name = fmt.Sprintf("projects/%s/snapshots/%s", strings.Split(s.name, "/")[1], name) @@ -148,13 +166,11 @@ func (s *Subscription) SeekToSnapshot(ctx context.Context, snap *Snapshot) error } func toSnapshotConfig(snap *pb.Snapshot, c *Client) (*SnapshotConfig, error) { - exp, err := ptypes.Timestamp(snap.ExpireTime) - if err != nil { - return nil, err - } + exp := snap.ExpireTime.AsTime() return &SnapshotConfig{ Snapshot: &Snapshot{c: c, name: snap.Name}, Topic: newTopic(c, snap.Topic), Expiration: exp, + Labels: snap.Labels, }, nil } diff --git a/vendor/cloud.google.com/go/pubsub/subscription.go b/vendor/cloud.google.com/go/pubsub/subscription.go index 045ddef6109c1..f325e65e3147e 100644 --- a/vendor/cloud.google.com/go/pubsub/subscription.go +++ b/vendor/cloud.google.com/go/pubsub/subscription.go @@ -25,14 +25,17 @@ import ( "cloud.google.com/go/iam" "cloud.google.com/go/internal/optional" - "github.com/golang/protobuf/ptypes" - durpb "github.com/golang/protobuf/ptypes/duration" + ipubsub "cloud.google.com/go/internal/pubsub" + "cloud.google.com/go/pubsub/internal/scheduler" gax "github.com/googleapis/gax-go/v2" "golang.org/x/sync/errgroup" pb "google.golang.org/genproto/googleapis/pubsub/v1" fmpb "google.golang.org/genproto/protobuf/field_mask" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + durpb "google.golang.org/protobuf/types/known/durationpb" + + vkit "cloud.google.com/go/pubsub/apiv1" ) // Subscription is a reference to a PubSub subscription. @@ -47,6 +50,8 @@ type Subscription struct { mu sync.Mutex receiveActive bool + + enableOrdering bool } // Subscription creates a reference to a subscription. @@ -83,7 +88,8 @@ func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator { Project: c.fullyQualifiedProjectName(), }) return &SubscriptionIterator{ - c: c, + c: c, + it: it, next: func() (string, error) { sub, err := it.Next() if err != nil { @@ -97,6 +103,7 @@ func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator { // SubscriptionIterator is an iterator that returns a series of subscriptions. type SubscriptionIterator struct { c *Client + it *vkit.SubscriptionIterator next func() (string, error) } @@ -109,6 +116,22 @@ func (subs *SubscriptionIterator) Next() (*Subscription, error) { return &Subscription{c: subs.c, name: subName}, nil } +// NextConfig returns the next subscription config. If there are no more subscriptions, +// iterator.Done will be returned. +// This call shares the underlying iterator with calls to `SubscriptionIterator.Next`. +// If you wish to use mix calls, create separate iterator instances for both. +func (subs *SubscriptionIterator) NextConfig() (*SubscriptionConfig, error) { + spb, err := subs.it.Next() + if err != nil { + return nil, err + } + cfg, err := protoToSubscriptionConfig(spb, subs.c) + if err != nil { + return nil, err + } + return &cfg, nil +} + // PushConfig contains configuration for subscriptions that operate in push mode. type PushConfig struct { // A URL locating the endpoint to which messages should be pushed. @@ -188,11 +211,106 @@ func (oidcToken *OIDCToken) toProto() *pb.PushConfig_OidcToken_ { } } +// BigQueryConfigState denotes the possible states for a BigQuery Subscription. +type BigQueryConfigState int + +const ( + // BigQueryConfigStateUnspecified is the default value. This value is unused. + BigQueryConfigStateUnspecified = iota + + // BigQueryConfigActive means the subscription can actively send messages to BigQuery. + BigQueryConfigActive + + // BigQueryConfigPermissionDenied means the subscription cannot write to the BigQuery table because of permission denied errors. + BigQueryConfigPermissionDenied + + // BigQueryConfigNotFound means the subscription cannot write to the BigQuery table because it does not exist. + BigQueryConfigNotFound + + // BigQueryConfigSchemaMismatch means the subscription cannot write to the BigQuery table due to a schema mismatch. + BigQueryConfigSchemaMismatch +) + +// BigQueryConfig configures the subscription to deliver to a BigQuery table. +type BigQueryConfig struct { + // The name of the table to which to write data, of the form + // {projectId}:{datasetId}.{tableId} + Table string + + // When true, use the topic's schema as the columns to write to in BigQuery, + // if it exists. + UseTopicSchema bool + + // When true, write the subscription name, message_id, publish_time, + // attributes, and ordering_key to additional columns in the table. The + // subscription name, message_id, and publish_time fields are put in their own + // columns while all other message properties (other than data) are written to + // a JSON object in the attributes column. + WriteMetadata bool + + // When true and use_topic_schema is true, any fields that are a part of the + // topic schema that are not part of the BigQuery table schema are dropped + // when writing to BigQuery. Otherwise, the schemas must be kept in sync and + // any messages with extra fields are not written and remain in the + // subscription's backlog. + DropUnknownFields bool + + // This is an output-only field that indicates whether or not the subscription can + // receive messages. This field is set only in responses from the server; + // it is ignored if it is set in any requests. + State BigQueryConfigState +} + +func (bc *BigQueryConfig) toProto() *pb.BigQueryConfig { + if bc == nil { + return nil + } + pbCfg := &pb.BigQueryConfig{ + Table: bc.Table, + UseTopicSchema: bc.UseTopicSchema, + WriteMetadata: bc.WriteMetadata, + DropUnknownFields: bc.DropUnknownFields, + State: pb.BigQueryConfig_State(bc.State), + } + return pbCfg +} + +// SubscriptionState denotes the possible states for a Subscription. +type SubscriptionState int + +const ( + // SubscriptionStateUnspecified is the default value. This value is unused. + SubscriptionStateUnspecified = iota + + // SubscriptionStateActive means the subscription can actively send messages to BigQuery. + SubscriptionStateActive + + // SubscriptionStateResourceError means the subscription receive messages because of an + // error with the resource to which it pushes messages. + // See the more detailed error state in the corresponding configuration. + SubscriptionStateResourceError +) + // SubscriptionConfig describes the configuration of a subscription. type SubscriptionConfig struct { - Topic *Topic + // The fully qualified identifier for the subscription, in the format "projects//subscriptions/" + name string + + // The topic from which this subscription is receiving messages. + Topic *Topic + + // If push delivery is used with this subscription, this field is + // used to configure it. Either `PushConfig` or `BigQueryConfig` can be set, + // but not both. If both are empty, then the subscriber will pull and ack + // messages using API methods. PushConfig PushConfig + // If delivery to BigQuery is used with this subscription, this field is + // used to configure it. Either `PushConfig` or `BigQueryConfig` can be set, + // but not both. If both are empty, then the subscriber will pull and ack + // messages using API methods. + BigQueryConfig BigQueryConfig + // The default maximum time after a subscriber receives a message before // the subscriber should acknowledge the message. Note: messages which are // obtained via Subscription.Receive need not be acknowledged within this @@ -222,13 +340,92 @@ type SubscriptionConfig struct { // The set of labels for the subscription. Labels map[string]string + // EnableMessageOrdering enables message ordering on this subscription. + // This value is only used for subscription creation and update, and + // is not read locally in calls like Subscription.Receive(). + // + // If set to false, even if messages are published with ordering keys, + // messages will not be delivered in order. + // + // When calling Subscription.Receive(), the client will check this + // value with a call to Subscription.Config(), which requires the + // roles/viewer or roles/pubsub.viewer role on your service account. + // If that call fails, mesages with ordering keys will be delivered in order. + EnableMessageOrdering bool + // DeadLetterPolicy specifies the conditions for dead lettering messages in // a subscription. If not set, dead lettering is disabled. - // - // It is EXPERIMENTAL and a part of a closed alpha that may not be - // accessible to all users. This field is subject to change or removal - // without notice. DeadLetterPolicy *DeadLetterPolicy + + // Filter is an expression written in the Cloud Pub/Sub filter language. If + // non-empty, then only `PubsubMessage`s whose `attributes` field matches the + // filter are delivered on this subscription. If empty, then no messages are + // filtered out. Cannot be changed after the subscription is created. + Filter string + + // RetryPolicy specifies how Cloud Pub/Sub retries message delivery. + RetryPolicy *RetryPolicy + + // Detached indicates whether the subscription is detached from its topic. + // Detached subscriptions don't receive messages from their topic and don't + // retain any backlog. `Pull` and `StreamingPull` requests will return + // FAILED_PRECONDITION. If the subscription is a push subscription, pushes to + // the endpoint will not be made. + Detached bool + + // TopicMessageRetentionDuration indicates the minimum duration for which a message is + // retained after it is published to the subscription's topic. If this field is + // set, messages published to the subscription's topic in the last + // `TopicMessageRetentionDuration` are always available to subscribers. + // You can enable both topic and subscription retention for the same topic. + // In this situation, the maximum of the retention durations takes effect. + // + // This is an output only field, meaning it will only appear in responses from the backend + // and will be ignored if sent in a request. + TopicMessageRetentionDuration time.Duration + + // EnableExactlyOnceDelivery configures Pub/Sub to provide the following guarantees + // for the delivery of a message with a given MessageID on this subscription: + // + // The message sent to a subscriber is guaranteed not to be resent + // before the message's acknowledgement deadline expires. + // An acknowledged message will not be resent to a subscriber. + // + // Note that subscribers may still receive multiple copies of a message + // when `enable_exactly_once_delivery` is true if the message was published + // multiple times by a publisher client. These copies are considered distinct + // by Pub/Sub and have distinct MessageID values. + // + // Lastly, to guarantee messages have been acked or nacked properly, you must + // call Message.AckWithResponse() or Message.NackWithResponse(). These return an + // AckResponse which will be ready if the message has been acked (or failed to be acked). + EnableExactlyOnceDelivery bool + + // State indicates whether or not the subscription can receive messages. + // This is an output-only field that indicates whether or not the subscription can + // receive messages. This field is set only in responses from the server; + // it is ignored if it is set in any requests. + State SubscriptionState +} + +// String returns the globally unique printable name of the subscription config. +// This method only works when the subscription config is returned from the server, +// such as when calling `client.Subscription` or `client.Subscriptions`. +// Otherwise, this will return an empty string. +func (s *SubscriptionConfig) String() string { + return s.name +} + +// ID returns the unique identifier of the subscription within its project. +// This method only works when the subscription config is returned from the server, +// such as when calling `client.Subscription` or `client.Subscriptions`. +// Otherwise, this will return an empty string. +func (s *SubscriptionConfig) ID() string { + slash := strings.LastIndex(s.name, "/") + if slash == -1 { + return "" + } + return s.name[slash+1:] } func (cfg *SubscriptionConfig) toProto(name string) *pb.Subscription { @@ -236,57 +433,75 @@ func (cfg *SubscriptionConfig) toProto(name string) *pb.Subscription { if cfg.PushConfig.Endpoint != "" || len(cfg.PushConfig.Attributes) != 0 || cfg.PushConfig.AuthenticationMethod != nil { pbPushConfig = cfg.PushConfig.toProto() } + var pbBigQueryConfig *pb.BigQueryConfig + if cfg.BigQueryConfig.Table != "" { + pbBigQueryConfig = cfg.BigQueryConfig.toProto() + } var retentionDuration *durpb.Duration if cfg.RetentionDuration != 0 { - retentionDuration = ptypes.DurationProto(cfg.RetentionDuration) + retentionDuration = durpb.New(cfg.RetentionDuration) } var pbDeadLetter *pb.DeadLetterPolicy if cfg.DeadLetterPolicy != nil { pbDeadLetter = cfg.DeadLetterPolicy.toProto() } + var pbRetryPolicy *pb.RetryPolicy + if cfg.RetryPolicy != nil { + pbRetryPolicy = cfg.RetryPolicy.toProto() + } return &pb.Subscription{ - Name: name, - Topic: cfg.Topic.name, - PushConfig: pbPushConfig, - AckDeadlineSeconds: trunc32(int64(cfg.AckDeadline.Seconds())), - RetainAckedMessages: cfg.RetainAckedMessages, - MessageRetentionDuration: retentionDuration, - Labels: cfg.Labels, - ExpirationPolicy: expirationPolicyToProto(cfg.ExpirationPolicy), - DeadLetterPolicy: pbDeadLetter, + Name: name, + Topic: cfg.Topic.name, + PushConfig: pbPushConfig, + BigqueryConfig: pbBigQueryConfig, + AckDeadlineSeconds: trunc32(int64(cfg.AckDeadline.Seconds())), + RetainAckedMessages: cfg.RetainAckedMessages, + MessageRetentionDuration: retentionDuration, + Labels: cfg.Labels, + ExpirationPolicy: expirationPolicyToProto(cfg.ExpirationPolicy), + EnableMessageOrdering: cfg.EnableMessageOrdering, + DeadLetterPolicy: pbDeadLetter, + Filter: cfg.Filter, + RetryPolicy: pbRetryPolicy, + Detached: cfg.Detached, + EnableExactlyOnceDelivery: cfg.EnableExactlyOnceDelivery, } } func protoToSubscriptionConfig(pbSub *pb.Subscription, c *Client) (SubscriptionConfig, error) { rd := time.Hour * 24 * 7 - var err error if pbSub.MessageRetentionDuration != nil { - rd, err = ptypes.Duration(pbSub.MessageRetentionDuration) - if err != nil { - return SubscriptionConfig{}, err - } + rd = pbSub.MessageRetentionDuration.AsDuration() } var expirationPolicy time.Duration if ttl := pbSub.ExpirationPolicy.GetTtl(); ttl != nil { - expirationPolicy, err = ptypes.Duration(ttl) - if err != nil { - return SubscriptionConfig{}, err - } + expirationPolicy = ttl.AsDuration() } dlp := protoToDLP(pbSub.DeadLetterPolicy) + rp := protoToRetryPolicy(pbSub.RetryPolicy) subC := SubscriptionConfig{ - Topic: newTopic(c, pbSub.Topic), - AckDeadline: time.Second * time.Duration(pbSub.AckDeadlineSeconds), - RetainAckedMessages: pbSub.RetainAckedMessages, - RetentionDuration: rd, - Labels: pbSub.Labels, - ExpirationPolicy: expirationPolicy, - DeadLetterPolicy: dlp, - } - pc := protoToPushConfig(pbSub.PushConfig) - if pc != nil { + name: pbSub.Name, + Topic: newTopic(c, pbSub.Topic), + AckDeadline: time.Second * time.Duration(pbSub.AckDeadlineSeconds), + RetainAckedMessages: pbSub.RetainAckedMessages, + RetentionDuration: rd, + Labels: pbSub.Labels, + ExpirationPolicy: expirationPolicy, + EnableMessageOrdering: pbSub.EnableMessageOrdering, + DeadLetterPolicy: dlp, + Filter: pbSub.Filter, + RetryPolicy: rp, + Detached: pbSub.Detached, + TopicMessageRetentionDuration: pbSub.TopicMessageRetentionDuration.AsDuration(), + EnableExactlyOnceDelivery: pbSub.EnableExactlyOnceDelivery, + State: SubscriptionState(pbSub.State), + } + if pc := protoToPushConfig(pbSub.PushConfig); pc != nil { subC.PushConfig = *pc } + if bq := protoToBQConfig(pbSub.GetBigqueryConfig()); bq != nil { + subC.BigQueryConfig = *bq + } return subC, nil } @@ -309,11 +524,22 @@ func protoToPushConfig(pbPc *pb.PushConfig) *PushConfig { return pc } +func protoToBQConfig(pbBQ *pb.BigQueryConfig) *BigQueryConfig { + if pbBQ == nil { + return nil + } + bq := &BigQueryConfig{ + Table: pbBQ.GetTable(), + UseTopicSchema: pbBQ.GetUseTopicSchema(), + DropUnknownFields: pbBQ.GetDropUnknownFields(), + WriteMetadata: pbBQ.GetWriteMetadata(), + State: BigQueryConfigState(pbBQ.State), + } + return bq +} + // DeadLetterPolicy specifies the conditions for dead lettering messages in // a subscription. -// -// It is EXPERIMENTAL and a part of a closed alpha that may not be -// accessible to all users. type DeadLetterPolicy struct { DeadLetterTopic string MaxDeliveryAttempts int @@ -338,6 +564,80 @@ func protoToDLP(pbDLP *pb.DeadLetterPolicy) *DeadLetterPolicy { } } +// RetryPolicy specifies how Cloud Pub/Sub retries message delivery. +// +// Retry delay will be exponential based on provided minimum and maximum +// backoffs. https://en.wikipedia.org/wiki/Exponential_backoff. +// +// RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded +// events for a given message. +// +// Retry Policy is implemented on a best effort basis. At times, the delay +// between consecutive deliveries may not match the configuration. That is, +// delay can be more or less than configured backoff. +type RetryPolicy struct { + // MinimumBackoff is the minimum delay between consecutive deliveries of a + // given message. Value should be between 0 and 600 seconds. Defaults to 10 seconds. + MinimumBackoff optional.Duration + // MaximumBackoff is the maximum delay between consecutive deliveries of a + // given message. Value should be between 0 and 600 seconds. Defaults to 600 seconds. + MaximumBackoff optional.Duration +} + +func (rp *RetryPolicy) toProto() *pb.RetryPolicy { + if rp == nil { + return nil + } + // If RetryPolicy is the empty struct, take this as an instruction + // to remove RetryPolicy from the subscription. + if rp.MinimumBackoff == nil && rp.MaximumBackoff == nil { + return nil + } + + // Initialize minDur and maxDur to be negative, such that if the conversion from an + // optional fails, RetryPolicy won't be updated in the proto as it will remain nil. + var minDur time.Duration = -1 + var maxDur time.Duration = -1 + if rp.MinimumBackoff != nil { + minDur = optional.ToDuration(rp.MinimumBackoff) + } + if rp.MaximumBackoff != nil { + maxDur = optional.ToDuration(rp.MaximumBackoff) + } + + var minDurPB, maxDurPB *durpb.Duration + if minDur > 0 { + minDurPB = durpb.New(minDur) + } + if maxDur > 0 { + maxDurPB = durpb.New(maxDur) + } + + return &pb.RetryPolicy{ + MinimumBackoff: minDurPB, + MaximumBackoff: maxDurPB, + } +} + +func protoToRetryPolicy(rp *pb.RetryPolicy) *RetryPolicy { + if rp == nil { + return nil + } + var minBackoff, maxBackoff time.Duration + if rp.MinimumBackoff != nil { + minBackoff = rp.MinimumBackoff.AsDuration() + } + if rp.MaximumBackoff != nil { + maxBackoff = rp.MaximumBackoff.AsDuration() + } + + retryPolicy := &RetryPolicy{ + MinimumBackoff: minBackoff, + MaximumBackoff: maxBackoff, + } + return retryPolicy +} + // ReceiveSettings configure the Receive method. // A zero ReceiveSettings will result in values equivalent to DefaultReceiveSettings. type ReceiveSettings struct { @@ -356,10 +656,20 @@ type ReceiveSettings struct { // bounds the maximum amount of time before a message redelivery in the // event the subscriber fails to extend the deadline. // - // MaxExtensionPeriod configuration can be disabled by specifying a - // duration less than (or equal to) 0. + // MaxExtensionPeriod must be between 10s and 600s (inclusive). This configuration + // can be disabled by specifying a duration less than (or equal to) 0. MaxExtensionPeriod time.Duration + // MinExtensionPeriod is the the min duration for a single lease extension attempt. + // By default the 99th percentile of ack latency is used to determine lease extension + // periods but this value can be set to minimize the number of extraneous RPCs sent. + // + // MinExtensionPeriod must be between 10s and 600s (inclusive). This configuration + // can be disabled by specifying a duration less than (or equal to) 0. + // Defaults to off but set to 60 seconds if the subscription has exactly-once delivery enabled, + // which will be added in a future release. + MinExtensionPeriod time.Duration + // MaxOutstandingMessages is the maximum number of unprocessed messages // (unacknowledged but not yet expired). If MaxOutstandingMessages is 0, it // will be treated as if it were DefaultReceiveSettings.MaxOutstandingMessages. @@ -374,9 +684,17 @@ type ReceiveSettings struct { // for unprocessed messages. MaxOutstandingBytes int - // NumGoroutines is the number of goroutines Receive will spawn to pull - // messages concurrently. If NumGoroutines is less than 1, it will be treated - // as if it were DefaultReceiveSettings.NumGoroutines. + // UseLegacyFlowControl disables enforcing flow control settings at the Cloud + // PubSub server and the less accurate method of only enforcing flow control + // at the client side is used. + // The default is false. + UseLegacyFlowControl bool + + // NumGoroutines is the number of goroutines that each datastructure along + // the Receive path will spawn. Adjusting this value adjusts concurrency + // along the receive path. + // + // NumGoroutines defaults to DefaultReceiveSettings.NumGoroutines. // // NumGoroutines does not limit the number of messages that can be processed // concurrently. Even with one goroutine, many messages might be processed at @@ -385,12 +703,21 @@ type ReceiveSettings struct { // processed concurrently, set MaxOutstandingMessages. NumGoroutines int - // If Synchronous is true, then no more than MaxOutstandingMessages will be in - // memory at one time. (In contrast, when Synchronous is false, more than - // MaxOutstandingMessages may have been received from the service and in memory - // before being processed.) MaxOutstandingBytes still refers to the total bytes - // processed, rather than in memory. NumGoroutines is ignored. + // Synchronous switches the underlying receiving mechanism to unary Pull. + // When Synchronous is false, the more performant StreamingPull is used. + // StreamingPull also has the benefit of subscriber affinity when using + // ordered delivery. + // When Synchronous is true, NumGoroutines is set to 1 and only one Pull + // RPC will be made to poll messages at a time. // The default is false. + // + // Deprecated. + // Previously, users might use Synchronous mode since StreamingPull had a limitation + // where MaxOutstandingMessages was not always respected with large batches of + // small messages. With server side flow control, this is no longer an issue + // and we recommend switching to the default StreamingPull mode by setting + // Synchronous to false. + // Synchronous mode does not work with exactly once delivery. Synchronous bool } @@ -413,16 +740,14 @@ type ReceiveSettings struct { // idea of a duration that is short, but not so short that we perform excessive RPCs. const synchronousWaitTime = 100 * time.Millisecond -// This is a var so that tests can change it. -var minAckDeadline = 10 * time.Second - // DefaultReceiveSettings holds the default values for ReceiveSettings. var DefaultReceiveSettings = ReceiveSettings{ MaxExtension: 60 * time.Minute, MaxExtensionPeriod: 0, + MinExtensionPeriod: 0, MaxOutstandingMessages: 1000, MaxOutstandingBytes: 1e9, // 1G - NumGoroutines: 1, + NumGoroutines: 10, } // Delete deletes the subscription. @@ -457,9 +782,14 @@ func (s *Subscription) Config(ctx context.Context) (SubscriptionConfig, error) { // SubscriptionConfigToUpdate describes how to update a subscription. type SubscriptionConfigToUpdate struct { - // If non-nil, the push config is changed. + // If non-nil, the push config is changed. Cannot be set at the same time as BigQueryConfig. + // If currently in push mode, set this value to the zero value to revert to a Pull based subscription. PushConfig *PushConfig + // If non-nil, the bigquery config is changed. Cannot be set at the same time as PushConfig. + // If currently in bigquery mode, set this value to the zero value to revert to a Pull based subscription, + BigQueryConfig *BigQueryConfig + // If non-zero, the ack deadline is changed. AckDeadline time.Duration @@ -474,9 +804,6 @@ type SubscriptionConfigToUpdate struct { // If non-nil, DeadLetterPolicy is changed. To remove dead lettering from // a subscription, use the zero value for this struct. - // - // It is EXPERIMENTAL and a part of a closed alpha that may not be - // accessible to all users. DeadLetterPolicy *DeadLetterPolicy // If non-nil, the current set of labels is completely @@ -484,6 +811,14 @@ type SubscriptionConfigToUpdate struct { // This field has beta status. It is not subject to the stability guarantee // and may change. Labels map[string]string + + // If non-nil, RetryPolicy is changed. To remove an existing retry policy + // (to redeliver messages as soon as possible) use a pointer to the zero value + // for this struct. + RetryPolicy *RetryPolicy + + // If set, EnableExactlyOnce is changed. + EnableExactlyOnceDelivery optional.Bool } // Update changes an existing subscription according to the fields set in cfg. @@ -493,7 +828,7 @@ type SubscriptionConfigToUpdate struct { func (s *Subscription) Update(ctx context.Context, cfg SubscriptionConfigToUpdate) (SubscriptionConfig, error) { req := s.updateRequest(&cfg) if err := cfg.validate(); err != nil { - return SubscriptionConfig{}, fmt.Errorf("pubsub: UpdateSubscription %v", err) + return SubscriptionConfig{}, fmt.Errorf("pubsub: UpdateSubscription %w", err) } if len(req.UpdateMask.Paths) == 0 { return SubscriptionConfig{}, errors.New("pubsub: UpdateSubscription call with nothing to update") @@ -512,6 +847,10 @@ func (s *Subscription) updateRequest(cfg *SubscriptionConfigToUpdate) *pb.Update psub.PushConfig = cfg.PushConfig.toProto() paths = append(paths, "push_config") } + if cfg.BigQueryConfig != nil { + psub.BigqueryConfig = cfg.BigQueryConfig.toProto() + paths = append(paths, "bigquery_config") + } if cfg.AckDeadline != 0 { psub.AckDeadlineSeconds = trunc32(int64(cfg.AckDeadline.Seconds())) paths = append(paths, "ack_deadline_seconds") @@ -521,7 +860,7 @@ func (s *Subscription) updateRequest(cfg *SubscriptionConfigToUpdate) *pb.Update paths = append(paths, "retain_acked_messages") } if cfg.RetentionDuration != 0 { - psub.MessageRetentionDuration = ptypes.DurationProto(cfg.RetentionDuration) + psub.MessageRetentionDuration = durpb.New(cfg.RetentionDuration) paths = append(paths, "message_retention_duration") } if cfg.ExpirationPolicy != nil { @@ -536,6 +875,14 @@ func (s *Subscription) updateRequest(cfg *SubscriptionConfigToUpdate) *pb.Update psub.Labels = cfg.Labels paths = append(paths, "labels") } + if cfg.RetryPolicy != nil { + psub.RetryPolicy = cfg.RetryPolicy.toProto() + paths = append(paths, "retry_policy") + } + if cfg.EnableExactlyOnceDelivery != nil { + psub.EnableExactlyOnceDelivery = optional.ToBool(cfg.EnableExactlyOnceDelivery) + paths = append(paths, "enable_exactly_once_delivery") + } return &pb.UpdateSubscriptionRequest{ Subscription: psub, UpdateMask: &fmpb.FieldMask{Paths: paths}, @@ -556,11 +903,11 @@ func (cfg *SubscriptionConfigToUpdate) validate() error { if cfg == nil || cfg.ExpirationPolicy == nil { return nil } - policy, min := optional.ToDuration(cfg.ExpirationPolicy), minExpirationPolicy - if policy == 0 || policy >= min { - return nil + expPolicy, min := optional.ToDuration(cfg.ExpirationPolicy), minExpirationPolicy + if expPolicy != 0 && expPolicy < min { + return fmt.Errorf("invalid expiration policy(%q) < minimum(%q)", expPolicy, min) } - return fmt.Errorf("invalid expiration policy(%q) < minimum(%q)", policy, min) + return nil } func expirationPolicyToProto(expirationPolicy optional.Duration) *pb.ExpirationPolicy { @@ -574,7 +921,7 @@ func expirationPolicyToProto(expirationPolicy optional.Duration) *pb.ExpirationP // https://godoc.org/google.golang.org/genproto/googleapis/pubsub/v1#ExpirationPolicy.Ttl // if ExpirationPolicy.Ttl is set to nil, the expirationPolicy is toggled to NEVER expire. if dur != 0 { - ttl = ptypes.DurationProto(dur) + ttl = durpb.New(dur) } return &pb.ExpirationPolicy{ Ttl: ttl, @@ -634,9 +981,9 @@ var errReceiveInProgress = errors.New("pubsub: Receive already in progress for t // // The standard way to terminate a Receive is to cancel its context: // -// cctx, cancel := context.WithCancel(ctx) -// err := sub.Receive(cctx, callback) -// // Call cancel from callback, or another goroutine. +// cctx, cancel := context.WithCancel(ctx) +// err := sub.Receive(cctx, callback) +// // Call cancel from callback, or another goroutine. // // If the service returns a non-retryable error, Receive returns that error after // all of the outstanding calls to f have returned. If ctx is done, Receive @@ -666,6 +1013,9 @@ func (s *Subscription) Receive(ctx context.Context, f func(context.Context, *Mes s.mu.Unlock() defer func() { s.mu.Lock(); s.receiveActive = false; s.mu.Unlock() }() + s.checkOrdering(ctx) + + // TODO(hongalex): move settings check to a helper function to make it more testable maxCount := s.ReceiveSettings.MaxOutstandingMessages if maxCount == 0 { maxCount = DefaultReceiveSettings.MaxOutstandingMessages @@ -681,6 +1031,15 @@ func (s *Subscription) Receive(ctx context.Context, f func(context.Context, *Mes // If MaxExtension is negative, disable automatic extension. maxExt = 0 } + maxExtPeriod := s.ReceiveSettings.MaxExtensionPeriod + if maxExtPeriod < 0 { + maxExtPeriod = DefaultReceiveSettings.MaxExtensionPeriod + } + minExtPeriod := s.ReceiveSettings.MinExtensionPeriod + if minExtPeriod < 0 { + minExtPeriod = DefaultReceiveSettings.MinExtensionPeriod + } + var numGoroutines int switch { case s.ReceiveSettings.Synchronous: @@ -692,110 +1051,194 @@ func (s *Subscription) Receive(ctx context.Context, f func(context.Context, *Mes } // TODO(jba): add tests that verify that ReceiveSettings are correctly processed. po := &pullOptions{ - maxExtension: maxExt, - maxPrefetch: trunc32(int64(maxCount)), - synchronous: s.ReceiveSettings.Synchronous, + maxExtension: maxExt, + maxExtensionPeriod: maxExtPeriod, + minExtensionPeriod: minExtPeriod, + maxPrefetch: trunc32(int64(maxCount)), + synchronous: s.ReceiveSettings.Synchronous, + maxOutstandingMessages: maxCount, + maxOutstandingBytes: maxBytes, + useLegacyFlowControl: s.ReceiveSettings.UseLegacyFlowControl, } - fc := newFlowController(maxCount, maxBytes) + fc := newSubscriptionFlowController(FlowControlSettings{ + MaxOutstandingMessages: maxCount, + MaxOutstandingBytes: maxBytes, + LimitExceededBehavior: FlowControlBlock, + }) + + sched := scheduler.NewReceiveScheduler(maxCount) // Wait for all goroutines started by Receive to return, so instead of an // obscure goroutine leak we have an obvious blocked call to Receive. group, gctx := errgroup.WithContext(ctx) - for i := 0; i < numGoroutines; i++ { - group.Go(func() error { - return s.receive(gctx, po, fc, f) - }) + + type closeablePair struct { + wg *sync.WaitGroup + iter *messageIterator } - return group.Wait() -} -func (s *Subscription) receive(ctx context.Context, po *pullOptions, fc *flowController, f func(context.Context, *Message)) error { - // Cancel a sub-context when we return, to kick the context-aware callbacks - // and the goroutine below. - ctx2, cancel := context.WithCancel(ctx) - // The iterator does not use the context passed to Receive. If it did, canceling - // that context would immediately stop the iterator without waiting for unacked - // messages. - iter := newMessageIterator(s.c.subc, s.name, &s.ReceiveSettings.MaxExtensionPeriod, po) - - // We cannot use errgroup from Receive here. Receive might already be calling group.Wait, - // and group.Wait cannot be called concurrently with group.Go. We give each receive() its - // own WaitGroup instead. - // Since wg.Add is only called from the main goroutine, wg.Wait is guaranteed - // to be called after all Adds. - var wg sync.WaitGroup - wg.Add(1) - go func() { - <-ctx2.Done() - // Call stop when Receive's context is done. - // Stop will block until all outstanding messages have been acknowledged - // or there was a fatal service error. - iter.stop() - wg.Done() - }() - defer wg.Wait() - - defer cancel() - for { - var maxToPull int32 // maximum number of messages to pull - if po.synchronous { - if po.maxPrefetch < 0 { - // If there is no limit on the number of messages to pull, use a reasonable default. - maxToPull = 1000 - } else { - // Limit the number of messages in memory to MaxOutstandingMessages - // (here, po.maxPrefetch). For each message currently in memory, we have - // called fc.acquire but not fc.release: this is fc.count(). The next - // call to Pull should fetch no more than the difference between these - // values. - maxToPull = po.maxPrefetch - int32(fc.count()) - if maxToPull <= 0 { - // Wait for some callbacks to finish. - if err := gax.Sleep(ctx, synchronousWaitTime); err != nil { + var pairs []closeablePair + + // Cancel a sub-context which, when we finish a single receiver, will kick + // off the context-aware callbacks and the goroutine below (which stops + // all receivers, iterators, and the scheduler). + ctx2, cancel2 := context.WithCancel(gctx) + defer cancel2() + + for i := 0; i < numGoroutines; i++ { + // The iterator does not use the context passed to Receive. If it did, + // canceling that context would immediately stop the iterator without + // waiting for unacked messages. + iter := newMessageIterator(s.c.subc, s.name, po) + + // We cannot use errgroup from Receive here. Receive might already be + // calling group.Wait, and group.Wait cannot be called concurrently with + // group.Go. We give each receive() its own WaitGroup instead. + // + // Since wg.Add is only called from the main goroutine, wg.Wait is + // guaranteed to be called after all Adds. + var wg sync.WaitGroup + wg.Add(1) + pairs = append(pairs, closeablePair{wg: &wg, iter: iter}) + + group.Go(func() error { + defer wg.Wait() + defer cancel2() + for { + var maxToPull int32 // maximum number of messages to pull + if po.synchronous { + if po.maxPrefetch < 0 { + // If there is no limit on the number of messages to + // pull, use a reasonable default. + maxToPull = 1000 + } else { + // Limit the number of messages in memory to MaxOutstandingMessages + // (here, po.maxPrefetch). For each message currently in memory, we have + // called fc.acquire but not fc.release: this is fc.count(). The next + // call to Pull should fetch no more than the difference between these + // values. + maxToPull = po.maxPrefetch - int32(fc.count()) + if maxToPull <= 0 { + // Wait for some callbacks to finish. + if err := gax.Sleep(ctx, synchronousWaitTime); err != nil { + // Return nil if the context is done, not err. + return nil + } + continue + } + } + } + // If the context is done, don't pull more messages. + select { + case <-ctx.Done(): + return nil + default: + } + msgs, err := iter.receive(maxToPull) + if err == io.EOF { + return nil + } + if err != nil { + return err + } + // If context is done and messages have been pulled, + // nack them. + select { + case <-ctx.Done(): + for _, m := range msgs { + m.Nack() + } + return nil + default: + } + for i, msg := range msgs { + msg := msg + // TODO(jba): call acquire closer to when the message is allocated. + if err := fc.acquire(ctx, len(msg.Data)); err != nil { + // TODO(jba): test that these "orphaned" messages are nacked immediately when ctx is done. + for _, m := range msgs[i:] { + m.Nack() + } // Return nil if the context is done, not err. return nil } - continue - } - } - } - msgs, err := iter.receive(maxToPull) - if err == io.EOF { - return nil - } - if err != nil { - return err - } - for i, msg := range msgs { - msg := msg - // TODO(jba): call acquire closer to when the message is allocated. - if err := fc.acquire(ctx, len(msg.Data)); err != nil { - // TODO(jba): test that these "orphaned" messages are nacked immediately when ctx is done. - for _, m := range msgs[i:] { - m.Nack() + iter.eoMu.RLock() + ackh, _ := msgAckHandler(msg, iter.enableExactlyOnceDelivery) + iter.eoMu.RUnlock() + old := ackh.doneFunc + msgLen := len(msg.Data) + ackh.doneFunc = func(ackID string, ack bool, r *ipubsub.AckResult, receiveTime time.Time) { + defer fc.release(ctx, msgLen) + old(ackID, ack, r, receiveTime) + } + wg.Add(1) + // Make sure the subscription has ordering enabled before adding to scheduler. + var key string + if s.enableOrdering { + key = msg.OrderingKey + } + // TODO(deklerk): Can we have a generic handler at the + // constructor level? + if err := sched.Add(key, msg, func(msg interface{}) { + defer wg.Done() + f(ctx2, msg.(*Message)) + }); err != nil { + wg.Done() + // If there are any errors with scheduling messages, + // nack them so they can be redelivered. + msg.Nack() + // Currently, only this error is returned by the receive scheduler. + if errors.Is(err, scheduler.ErrReceiveDraining) { + return nil + } + return err + } } - // Return nil if the context is done, not err. - return nil - } - old := msg.doneFunc - msgLen := len(msg.Data) - msg.doneFunc = func(ackID string, ack bool, receiveTime time.Time) { - defer fc.release(msgLen) - old(ackID, ack, receiveTime) } - wg.Add(1) - go func() { - defer wg.Done() - f(ctx2, msg) - }() + }) + } + + go func() { + <-ctx2.Done() + + // Wait for all iterators to stop. + for _, p := range pairs { + p.iter.stop() + p.wg.Done() } + + // This _must_ happen after every iterator has stopped, or some + // iterator will still have undelivered messages but the scheduler will + // already be shut down. + sched.Shutdown() + }() + + return group.Wait() +} + +// checkOrdering calls Config to check theEnableMessageOrdering field. +// If this call fails (e.g. because the service account doesn't have +// the roles/viewer or roles/pubsub.viewer role) we will assume +// EnableMessageOrdering to be true. +// See: https://github.com/googleapis/google-cloud-go/issues/3884 +func (s *Subscription) checkOrdering(ctx context.Context) { + cfg, err := s.Config(ctx) + if err != nil { + s.enableOrdering = true + } else { + s.enableOrdering = cfg.EnableMessageOrdering } } type pullOptions struct { - maxExtension time.Duration - maxPrefetch int32 + maxExtension time.Duration // the maximum time to extend a message's ack deadline in total + maxExtensionPeriod time.Duration // the maximum time to extend a message's ack deadline per modack rpc + minExtensionPeriod time.Duration // the minimum time to extend a message's lease duration per modack + maxPrefetch int32 // the max number of outstanding messages, used to calculate maxToPull // If true, use unary Pull instead of StreamingPull, and never pull more // than maxPrefetch messages. - synchronous bool + synchronous bool + maxOutstandingMessages int + maxOutstandingBytes int + useLegacyFlowControl bool } diff --git a/vendor/cloud.google.com/go/pubsub/topic.go b/vendor/cloud.google.com/go/pubsub/topic.go index cf8fc5d3460ed..c96b9ce9bce81 100644 --- a/vendor/cloud.google.com/go/pubsub/topic.go +++ b/vendor/cloud.google.com/go/pubsub/topic.go @@ -25,7 +25,10 @@ import ( "time" "cloud.google.com/go/iam" - "github.com/golang/protobuf/proto" + "cloud.google.com/go/internal/optional" + ipubsub "cloud.google.com/go/internal/pubsub" + vkit "cloud.google.com/go/pubsub/apiv1" + "cloud.google.com/go/pubsub/internal/scheduler" gax "github.com/googleapis/gax-go/v2" "go.opencensus.io/stats" "go.opencensus.io/tag" @@ -35,6 +38,8 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" ) const ( @@ -47,6 +52,13 @@ const ( MaxPublishRequestBytes = 1e7 ) +const ( + // TODO: math.MaxInt was added in Go 1.17. We should use that once 1.17 + // becomes the minimum supported version of Go. + intSize = 32 << (^uint(0) >> 63) + maxInt = 1<<(intSize-1) - 1 +) + // ErrOversizedMessage indicates that a message's size exceeds MaxPublishRequestBytes. var ErrOversizedMessage = bundler.ErrOversizedItem @@ -62,9 +74,14 @@ type Topic struct { // first call to Publish. The default is DefaultPublishSettings. PublishSettings PublishSettings - mu sync.RWMutex - stopped bool - bundler *bundler.Bundler + mu sync.RWMutex + stopped bool + scheduler *scheduler.PublishScheduler + + flowController + + // EnableMessageOrdering enables delivery of ordered keys. + EnableMessageOrdering bool } // PublishSettings control the bundling of published messages. @@ -80,7 +97,9 @@ type PublishSettings struct { // Publish a batch when its size in bytes reaches this value. ByteThreshold int - // The number of goroutines that invoke the Publish RPC concurrently. + // The number of goroutines used in each of the data structures that are + // involved along the the Publish path. Adjusting this value adjusts + // concurrency along the publish path. // // Defaults to a multiple of GOMAXPROCS. NumGoroutines int @@ -89,10 +108,15 @@ type PublishSettings struct { Timeout time.Duration // The maximum number of bytes that the Bundler will keep in memory before - // returning ErrOverflow. + // returning ErrOverflow. This is now superseded by FlowControlSettings.MaxOutstandingBytes. + // If MaxOutstandingBytes is set, that value will override BufferedByteLimit. // // Defaults to DefaultPublishSettings.BufferedByteLimit. + // Deprecated: Set `Topic.PublishSettings.FlowControlSettings.MaxOutstandingBytes` instead. BufferedByteLimit int + + // FlowControlSettings defines publisher flow control settings. + FlowControlSettings FlowControlSettings } // DefaultPublishSettings holds the default values for topics' PublishSettings. @@ -105,6 +129,11 @@ var DefaultPublishSettings = PublishSettings{ // chosen as a reasonable amount of messages in the worst case whilst still // capping the number to a low enough value to not OOM users. BufferedByteLimit: 10 * MaxPublishRequestBytes, + FlowControlSettings: FlowControlSettings{ + MaxOutstandingMessages: 1000, + MaxOutstandingBytes: -1, + LimitExceededBehavior: FlowControlIgnore, + }, } // CreateTopic creates a new topic. @@ -136,12 +165,9 @@ func (c *Client) CreateTopic(ctx context.Context, topicID string) (*Topic, error // If the topic already exists, an error will be returned. func (c *Client) CreateTopicWithConfig(ctx context.Context, topicID string, tc *TopicConfig) (*Topic, error) { t := c.Topic(topicID) - _, err := c.pubc.CreateTopic(ctx, &pb.Topic{ - Name: t.name, - Labels: tc.Labels, - MessageStoragePolicy: messageStoragePolicyToProto(&tc.MessageStoragePolicy), - KmsKeyName: tc.KMSKeyName, - }) + topic := tc.toProto() + topic.Name = t.name + _, err := c.pubc.CreateTopic(ctx, topic) if err != nil { return nil, err } @@ -178,6 +204,9 @@ func newTopic(c *Client, name string) *Topic { // TopicConfig describes the configuration of a topic. type TopicConfig struct { + // The fully qualified identifier for the topic, in the format "projects//topics/" + name string + // The set of labels for the topic. Labels map[string]string @@ -188,6 +217,57 @@ type TopicConfig struct { // published to this topic, in the format // "projects/P/locations/L/keyRings/R/cryptoKeys/K". KMSKeyName string + + // Schema defines the schema settings upon topic creation. This cannot + // be modified after a topic has been created. + SchemaSettings *SchemaSettings + + // RetentionDuration configures the minimum duration to retain a message + // after it is published to the topic. If this field is set, messages published + // to the topic in the last `RetentionDuration` are always available to subscribers. + // For instance, it allows any attached subscription to [seek to a + // timestamp](https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) + // that is up to `RetentionDuration` in the past. If this field is + // not set, message retention is controlled by settings on individual + // subscriptions. Cannot be more than 7 days or less than 10 minutes. + // + // For more information, see https://cloud.google.com/pubsub/docs/replay-overview#topic_message_retention. + RetentionDuration optional.Duration +} + +// String returns the printable globally unique name for the topic config. +// This method only works when the topic config is returned from the server, +// such as when calling `client.Topic` or `client.Topics`. +// Otherwise, this will return an empty string. +func (t *TopicConfig) String() string { + return t.name +} + +// ID returns the unique identifier of the topic within its project. +// This method only works when the topic config is returned from the server, +// such as when calling `client.Topic` or `client.Topics`. +// Otherwise, this will return an empty string. +func (t *TopicConfig) ID() string { + slash := strings.LastIndex(t.name, "/") + if slash == -1 { + return "" + } + return t.name[slash+1:] +} + +func (tc *TopicConfig) toProto() *pb.Topic { + var retDur *durationpb.Duration + if tc.RetentionDuration != nil { + retDur = durationpb.New(optional.ToDuration(tc.RetentionDuration)) + } + pbt := &pb.Topic{ + Labels: tc.Labels, + MessageStoragePolicy: messageStoragePolicyToProto(&tc.MessageStoragePolicy), + KmsKeyName: tc.KMSKeyName, + SchemaSettings: schemaSettingsToProto(tc.SchemaSettings), + MessageRetentionDuration: retDur, + } + return pbt } // TopicConfigToUpdate describes how to update a topic. @@ -207,14 +287,43 @@ type TopicConfigToUpdate struct { // This field has beta status. It is not subject to the stability guarantee // and may change. MessageStoragePolicy *MessageStoragePolicy + + // If set to a positive duration between 10 minutes and 7 days, RetentionDuration is changed. + // If set to a negative value, this clears RetentionDuration from the topic. + // If nil, the retention duration remains unchanged. + RetentionDuration optional.Duration } func protoToTopicConfig(pbt *pb.Topic) TopicConfig { - return TopicConfig{ + tc := TopicConfig{ + name: pbt.Name, Labels: pbt.Labels, MessageStoragePolicy: protoToMessageStoragePolicy(pbt.MessageStoragePolicy), KMSKeyName: pbt.KmsKeyName, + SchemaSettings: protoToSchemaSettings(pbt.SchemaSettings), + } + if pbt.GetMessageRetentionDuration() != nil { + tc.RetentionDuration = pbt.GetMessageRetentionDuration().AsDuration() } + return tc +} + +// DetachSubscriptionResult is the response for the DetachSubscription method. +// Reserved for future use. +type DetachSubscriptionResult struct{} + +// DetachSubscription detaches a subscription from its topic. All messages +// retained in the subscription are dropped. Subsequent `Pull` and `StreamingPull` +// requests will return FAILED_PRECONDITION. If the subscription is a push +// subscription, pushes to the endpoint will stop. +func (c *Client) DetachSubscription(ctx context.Context, sub string) (*DetachSubscriptionResult, error) { + _, err := c.pubc.DetachSubscription(ctx, &pb.DetachSubscriptionRequest{ + Subscription: sub, + }) + if err != nil { + return nil, err + } + return &DetachSubscriptionResult{}, nil } // MessageStoragePolicy constrains how messages published to the topic may be stored. It @@ -285,6 +394,15 @@ func (t *Topic) updateRequest(cfg TopicConfigToUpdate) *pb.UpdateTopicRequest { pt.MessageStoragePolicy = messageStoragePolicyToProto(cfg.MessageStoragePolicy) paths = append(paths, "message_storage_policy") } + if cfg.RetentionDuration != nil { + r := optional.ToDuration(cfg.RetentionDuration) + pt.MessageRetentionDuration = durationpb.New(r) + if r < 0 { + // Clear MessageRetentionDuration if sentinel value is read. + pt.MessageRetentionDuration = nil + } + paths = append(paths, "message_retention_duration") + } return &pb.UpdateTopicRequest{ Topic: pt, UpdateMask: &fmpb.FieldMask{Paths: paths}, @@ -295,7 +413,8 @@ func (t *Topic) updateRequest(cfg TopicConfigToUpdate) *pb.UpdateTopicRequest { func (c *Client) Topics(ctx context.Context) *TopicIterator { it := c.pubc.ListTopics(ctx, &pb.ListTopicsRequest{Project: c.fullyQualifiedProjectName()}) return &TopicIterator{ - c: c, + c: c, + it: it, next: func() (string, error) { topic, err := it.Next() if err != nil { @@ -309,6 +428,7 @@ func (c *Client) Topics(ctx context.Context) *TopicIterator { // TopicIterator is an iterator that returns a series of topics. type TopicIterator struct { c *Client + it *vkit.TopicIterator next func() (string, error) } @@ -321,6 +441,19 @@ func (tps *TopicIterator) Next() (*Topic, error) { return newTopic(tps.c, topicName), nil } +// NextConfig returns the next topic config. If there are no more topics, +// iterator.Done will be returned. +// This call shares the underlying iterator with calls to `TopicIterator.Next`. +// If you wish to use mix calls, create separate iterator instances for both. +func (t *TopicIterator) NextConfig() (*TopicConfig, error) { + tpb, err := t.it.Next() + if err != nil { + return nil, err + } + cfg := protoToTopicConfig(tpb) + return &cfg, nil +} + // ID returns the unique identifier of the topic within its project. func (t *Topic) ID() string { slash := strings.LastIndex(t.name, "/") @@ -376,6 +509,17 @@ func (t *Topic) Subscriptions(ctx context.Context) *SubscriptionIterator { var errTopicStopped = errors.New("pubsub: Stop has been called for this topic") +// A PublishResult holds the result from a call to Publish. +// +// Call Get to obtain the result of the Publish call. Example: +// +// // Get blocks until Publish completes or ctx is done. +// id, err := r.Get(ctx) +// if err != nil { +// // TODO: Handle error. +// } +type PublishResult = ipubsub.PublishResult + // Publish publishes msg to the topic asynchronously. Messages are batched and // sent according to the topic's PublishSettings. Publish never blocks. // @@ -386,34 +530,44 @@ var errTopicStopped = errors.New("pubsub: Stop has been called for this topic") // need to be stopped by calling t.Stop(). Once stopped, future calls to Publish // will immediately return a PublishResult with an error. func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult { - // Use a PublishRequest with only the Messages field to calculate the size - // of an individual message. This accurately calculates the size of the - // encoded proto message by accounting for the length of an individual - // PubSubMessage and Data/Attributes field. - // TODO(hongalex): if this turns out to take significant time, try to approximate it. - msg.size = proto.Size(&pb.PublishRequest{ - Messages: []*pb.PubsubMessage{ - { - Data: msg.Data, - Attributes: msg.Attributes, - }, - }, + ctx, err := tag.New(ctx, tag.Insert(keyStatus, "OK"), tag.Upsert(keyTopic, t.name)) + if err != nil { + log.Printf("pubsub: cannot create context with tag in Publish: %v", err) + } + + r := ipubsub.NewPublishResult() + if !t.EnableMessageOrdering && msg.OrderingKey != "" { + ipubsub.SetPublishResult(r, "", errors.New("Topic.EnableMessageOrdering=false, but an OrderingKey was set in Message. Please remove the OrderingKey or turn on Topic.EnableMessageOrdering")) + return r + } + + // Calculate the size of the encoded proto message by accounting + // for the length of an individual PubSubMessage and Data/Attributes field. + msgSize := proto.Size(&pb.PubsubMessage{ + Data: msg.Data, + Attributes: msg.Attributes, + OrderingKey: msg.OrderingKey, }) - r := &PublishResult{ready: make(chan struct{})} + t.initBundler() t.mu.RLock() defer t.mu.RUnlock() // TODO(aboulhosn) [from bcmills] consider changing the semantics of bundler to perform this logic so we don't have to do it here if t.stopped { - r.set("", errTopicStopped) + ipubsub.SetPublishResult(r, "", errTopicStopped) return r } - // TODO(jba) [from bcmills] consider using a shared channel per bundle - // (requires Bundler API changes; would reduce allocations) - err := t.bundler.Add(&bundledMessage{msg, r}, msg.size) + if err := t.flowController.acquire(ctx, msgSize); err != nil { + t.scheduler.Pause(msg.OrderingKey) + ipubsub.SetPublishResult(r, "", err) + return r + } + err = t.scheduler.Add(msg.OrderingKey, &bundledMessage{msg, r, msgSize}, msgSize) if err != nil { - r.set("", err) + fmt.Printf("got err: %v\n", err) + t.scheduler.Pause(msg.OrderingKey) + ipubsub.SetPublishResult(r, "", err) } return r } @@ -423,57 +577,32 @@ func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult { // failed to be sent. func (t *Topic) Stop() { t.mu.Lock() - noop := t.stopped || t.bundler == nil + noop := t.stopped || t.scheduler == nil t.stopped = true t.mu.Unlock() if noop { return } - t.bundler.Flush() -} - -// A PublishResult holds the result from a call to Publish. -type PublishResult struct { - ready chan struct{} - serverID string - err error + t.scheduler.FlushAndStop() } -// Ready returns a channel that is closed when the result is ready. -// When the Ready channel is closed, Get is guaranteed not to block. -func (r *PublishResult) Ready() <-chan struct{} { return r.ready } - -// Get returns the server-generated message ID and/or error result of a Publish call. -// Get blocks until the Publish call completes or the context is done. -func (r *PublishResult) Get(ctx context.Context) (serverID string, err error) { - // If the result is already ready, return it even if the context is done. - select { - case <-r.Ready(): - return r.serverID, r.err - default: - } - select { - case <-ctx.Done(): - return "", ctx.Err() - case <-r.Ready(): - return r.serverID, r.err +// Flush blocks until all remaining messages are sent. +func (t *Topic) Flush() { + if t.stopped || t.scheduler == nil { + return } -} - -func (r *PublishResult) set(sid string, err error) { - r.serverID = sid - r.err = err - close(r.ready) + t.scheduler.Flush() } type bundledMessage struct { - msg *Message - res *PublishResult + msg *Message + res *PublishResult + size int } func (t *Topic) initBundler() { t.mu.RLock() - noop := t.stopped || t.bundler != nil + noop := t.stopped || t.scheduler != nil t.mu.RUnlock() if noop { return @@ -481,12 +610,21 @@ func (t *Topic) initBundler() { t.mu.Lock() defer t.mu.Unlock() // Must re-check, since we released the lock. - if t.stopped || t.bundler != nil { + if t.stopped || t.scheduler != nil { return } timeout := t.PublishSettings.Timeout - t.bundler = bundler.NewBundler(&bundledMessage{}, func(items interface{}) { + + workers := t.PublishSettings.NumGoroutines + // Unless overridden, allow many goroutines per CPU to call the Publish RPC + // concurrently. The default value was determined via extensive load + // testing (see the loadtest subdirectory). + if t.PublishSettings.NumGoroutines == 0 { + workers = 25 * runtime.GOMAXPROCS(0) + } + + t.scheduler = scheduler.NewPublishScheduler(workers, func(bundle interface{}) { // TODO(jba): use a context detached from the one passed to NewClient. ctx := context.TODO() if timeout != 0 { @@ -494,30 +632,42 @@ func (t *Topic) initBundler() { ctx, cancel = context.WithTimeout(ctx, timeout) defer cancel() } - t.publishMessageBundle(ctx, items.([]*bundledMessage)) + t.publishMessageBundle(ctx, bundle.([]*bundledMessage)) }) - t.bundler.DelayThreshold = t.PublishSettings.DelayThreshold - t.bundler.BundleCountThreshold = t.PublishSettings.CountThreshold - if t.bundler.BundleCountThreshold > MaxPublishRequestCount { - t.bundler.BundleCountThreshold = MaxPublishRequestCount + t.scheduler.DelayThreshold = t.PublishSettings.DelayThreshold + t.scheduler.BundleCountThreshold = t.PublishSettings.CountThreshold + if t.scheduler.BundleCountThreshold > MaxPublishRequestCount { + t.scheduler.BundleCountThreshold = MaxPublishRequestCount } - t.bundler.BundleByteThreshold = t.PublishSettings.ByteThreshold + t.scheduler.BundleByteThreshold = t.PublishSettings.ByteThreshold + + fcs := DefaultPublishSettings.FlowControlSettings + fcs.LimitExceededBehavior = t.PublishSettings.FlowControlSettings.LimitExceededBehavior + if t.PublishSettings.FlowControlSettings.MaxOutstandingBytes > 0 { + b := t.PublishSettings.FlowControlSettings.MaxOutstandingBytes + fcs.MaxOutstandingBytes = b + + // If MaxOutstandingBytes is set, disable BufferedByteLimit by setting it to maxint. + // This is because there's no way to set "unlimited" for BufferedByteLimit, + // and simply setting it to MaxOutstandingBytes occasionally leads to issues where + // BufferedByteLimit is reached even though there are resources available. + t.PublishSettings.BufferedByteLimit = maxInt + } + if t.PublishSettings.FlowControlSettings.MaxOutstandingMessages > 0 { + fcs.MaxOutstandingMessages = t.PublishSettings.FlowControlSettings.MaxOutstandingMessages + } + + t.flowController = newTopicFlowController(fcs) bufferedByteLimit := DefaultPublishSettings.BufferedByteLimit if t.PublishSettings.BufferedByteLimit > 0 { bufferedByteLimit = t.PublishSettings.BufferedByteLimit } - t.bundler.BufferedByteLimit = bufferedByteLimit + t.scheduler.BufferedByteLimit = bufferedByteLimit - // Set the bundler's max size per payload, accounting for topic name's overhead. - t.bundler.BundleByteLimit = MaxPublishRequestBytes - calcFieldSizeString(t.name) - // Unless overridden, allow many goroutines per CPU to call the Publish RPC concurrently. - // The default value was determined via extensive load testing (see the loadtest subdirectory). - if t.PublishSettings.NumGoroutines > 0 { - t.bundler.HandlerLimit = t.PublishSettings.NumGoroutines - } else { - t.bundler.HandlerLimit = 25 * runtime.GOMAXPROCS(0) - } + // Calculate the max limit of a single bundle. 5 comes from the number of bytes + // needed to be reserved for encoding the PubsubMessage repeated field. + t.scheduler.BundleByteLimit = MaxPublishRequestBytes - calcFieldSizeString(t.name) - 5 } func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) { @@ -526,20 +676,38 @@ func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) log.Printf("pubsub: cannot create context with tag in publishMessageBundle: %v", err) } pbMsgs := make([]*pb.PubsubMessage, len(bms)) + var orderingKey string for i, bm := range bms { + orderingKey = bm.msg.OrderingKey pbMsgs[i] = &pb.PubsubMessage{ - Data: bm.msg.Data, - Attributes: bm.msg.Attributes, + Data: bm.msg.Data, + Attributes: bm.msg.Attributes, + OrderingKey: bm.msg.OrderingKey, } bm.msg = nil // release bm.msg for GC } + var res *pb.PublishResponse start := time.Now() - res, err := t.c.pubc.Publish(ctx, &pb.PublishRequest{ - Topic: t.name, - Messages: pbMsgs, - }, gax.WithGRPCOptions(grpc.MaxCallSendMsgSize(maxSendRecvBytes))) + if orderingKey != "" && t.scheduler.IsPaused(orderingKey) { + err = fmt.Errorf("pubsub: Publishing for ordering key, %s, paused due to previous error. Call topic.ResumePublish(orderingKey) before resuming publishing", orderingKey) + } else { + // Apply custom publish retryer on top of user specified retryer and + // default retryer. + opts := t.c.pubc.CallOptions.Publish + var settings gax.CallSettings + for _, opt := range opts { + opt.Resolve(&settings) + } + r := &publishRetryer{defaultRetryer: settings.Retry()} + res, err = t.c.pubc.Publish(ctx, &pb.PublishRequest{ + Topic: t.name, + Messages: pbMsgs, + }, gax.WithGRPCOptions(grpc.MaxCallSendMsgSize(maxSendRecvBytes)), + gax.WithRetry(func() gax.Retryer { return r })) + } end := time.Now() if err != nil { + t.scheduler.Pause(orderingKey) // Update context with error tag for OpenCensus, // using same stats.Record() call as success case. ctx, _ = tag.New(ctx, tag.Upsert(keyStatus, "ERROR"), @@ -549,10 +717,26 @@ func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) PublishLatency.M(float64(end.Sub(start)/time.Millisecond)), PublishedMessages.M(int64(len(bms)))) for i, bm := range bms { + t.flowController.release(ctx, bm.size) if err != nil { - bm.res.set("", err) + ipubsub.SetPublishResult(bm.res, "", err) } else { - bm.res.set(res.MessageIds[i], nil) + ipubsub.SetPublishResult(bm.res, res.MessageIds[i], nil) } } } + +// ResumePublish resumes accepting messages for the provided ordering key. +// Publishing using an ordering key might be paused if an error is +// encountered while publishing, to prevent messages from being published +// out of order. +func (t *Topic) ResumePublish(orderingKey string) { + t.mu.RLock() + noop := t.scheduler == nil + t.mu.RUnlock() + if noop { + return + } + + t.scheduler.Resume(orderingKey) +} diff --git a/vendor/cloud.google.com/go/pubsub/trace.go b/vendor/cloud.google.com/go/pubsub/trace.go index 4036a350f981a..cadc3eb6d50ae 100644 --- a/vendor/cloud.google.com/go/pubsub/trace.go +++ b/vendor/cloud.google.com/go/pubsub/trace.go @@ -19,20 +19,11 @@ import ( "log" "sync" - "go.opencensus.io/plugin/ocgrpc" "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" - "google.golang.org/api/option" - "google.golang.org/grpc" ) -func openCensusOptions() []option.ClientOption { - return []option.ClientOption{ - option.WithGRPCDialOption(grpc.WithStatsHandler(&ocgrpc.ClientHandler{})), - } -} - // The following keys are used to tag requests with a specific topic/subscription ID. var ( keyTopic = tag.MustNewKey("topic") @@ -93,6 +84,22 @@ var ( // StreamResponseCount is a measure of the number of responses received on a streaming-pull stream. // It is EXPERIMENTAL and subject to change or removal without notice. StreamResponseCount = stats.Int64(statsPrefix+"stream_response_count", "Number of gRPC StreamingPull response messages received", stats.UnitDimensionless) + + // OutstandingMessages is a measure of the number of outstanding messages held by the client before they are processed. + // It is EXPERIMENTAL and subject to change or removal without notice. + OutstandingMessages = stats.Int64(statsPrefix+"outstanding_messages", "Number of outstanding Pub/Sub messages", stats.UnitDimensionless) + + // OutstandingBytes is a measure of the number of bytes all outstanding messages held by the client take up. + // It is EXPERIMENTAL and subject to change or removal without notice. + OutstandingBytes = stats.Int64(statsPrefix+"outstanding_bytes", "Number of outstanding bytes", stats.UnitDimensionless) + + // PublisherOutstandingMessages is a measure of the number of published outstanding messages held by the client before they are processed. + // It is EXPERIMENTAL and subject to change or removal without notice. + PublisherOutstandingMessages = stats.Int64(statsPrefix+"publisher_outstanding_messages", "Number of outstanding publish messages", stats.UnitDimensionless) + + // PublisherOutstandingBytes is a measure of the number of bytes all outstanding publish messages held by the client take up. + // It is EXPERIMENTAL and subject to change or removal without notice. + PublisherOutstandingBytes = stats.Int64(statsPrefix+"publisher_outstanding_bytes", "Number of outstanding publish bytes", stats.UnitDimensionless) ) var ( @@ -139,11 +146,29 @@ var ( // StreamResponseCountView is a cumulative sum of StreamResponseCount. // It is EXPERIMENTAL and subject to change or removal without notice. StreamResponseCountView *view.View + + // OutstandingMessagesView is the last value of OutstandingMessages + // It is EXPERIMENTAL and subject to change or removal without notice. + OutstandingMessagesView *view.View + + // OutstandingBytesView is the last value of OutstandingBytes + // It is EXPERIMENTAL and subject to change or removal without notice. + OutstandingBytesView *view.View + + // PublisherOutstandingMessagesView is the last value of OutstandingMessages + // It is EXPERIMENTAL and subject to change or removal without notice. + PublisherOutstandingMessagesView *view.View + + // PublisherOutstandingBytesView is the last value of OutstandingBytes + // It is EXPERIMENTAL and subject to change or removal without notice. + PublisherOutstandingBytesView *view.View ) func init() { PublishedMessagesView = createCountView(stats.Measure(PublishedMessages), keyTopic, keyStatus, keyError) PublishLatencyView = createDistView(PublishLatency, keyTopic, keyStatus, keyError) + PublisherOutstandingMessagesView = createLastValueView(PublisherOutstandingMessages, keyTopic) + PublisherOutstandingBytesView = createLastValueView(PublisherOutstandingBytes, keyTopic) PullCountView = createCountView(PullCount, keySubscription) AckCountView = createCountView(AckCount, keySubscription) NackCountView = createCountView(NackCount, keySubscription) @@ -153,10 +178,14 @@ func init() { StreamRetryCountView = createCountView(StreamRetryCount, keySubscription) StreamRequestCountView = createCountView(StreamRequestCount, keySubscription) StreamResponseCountView = createCountView(StreamResponseCount, keySubscription) + OutstandingMessagesView = createLastValueView(OutstandingMessages, keySubscription) + OutstandingBytesView = createLastValueView(OutstandingBytes, keySubscription) DefaultPublishViews = []*view.View{ PublishedMessagesView, PublishLatencyView, + PublisherOutstandingMessagesView, + PublisherOutstandingBytesView, } DefaultSubscribeViews = []*view.View{ @@ -169,6 +198,8 @@ func init() { StreamRetryCountView, StreamRequestCountView, StreamResponseCountView, + OutstandingMessagesView, + OutstandingBytesView, } } @@ -199,6 +230,16 @@ func createDistView(m stats.Measure, keys ...tag.Key) *view.View { } } +func createLastValueView(m stats.Measure, keys ...tag.Key) *view.View { + return &view.View{ + Name: m.Name(), + Description: m.Description(), + TagKeys: keys, + Measure: m, + Aggregation: view.LastValue(), + } +} + var logOnce sync.Once // withSubscriptionKey returns a new context modified with the subscriptionKey tag map. diff --git a/vendor/cloud.google.com/go/release-please-config-individual.json b/vendor/cloud.google.com/go/release-please-config-individual.json new file mode 100644 index 0000000000000..b8766e0f2ea78 --- /dev/null +++ b/vendor/cloud.google.com/go/release-please-config-individual.json @@ -0,0 +1,45 @@ +{ + "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json", + "release-type": "go-yoshi", + "include-component-in-tag": true, + "separate-pull-requests": true, + "tag-separator": "/", + "packages": { + "bigquery": { + "component": "bigquery" + }, + "bigtable": { + "component": "bigtable" + }, + "datastore": { + "component": "datastore" + }, + "errorreporting": { + "component": "errorreporting" + }, + "firestore": { + "component": "firestore" + }, + "logging": { + "component": "logging" + }, + "profiler": { + "component": "profiler" + }, + "pubsub": { + "component": "pubsub" + }, + "pubsublite": { + "component": "pubsublite" + }, + "spanner": { + "component": "spanner" + }, + "storage": { + "component": "storage" + } + }, + "plugins": [ + "sentence-case" + ] +} \ No newline at end of file diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json new file mode 100644 index 0000000000000..a0b54cb5de009 --- /dev/null +++ b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json @@ -0,0 +1,347 @@ +{ + "release-type": "go-yoshi", + "include-component-in-tag": true, + "tag-separator": "/", + "packages": { + "accessapproval": { + "component": "accessapproval" + }, + "accesscontextmanager": { + "component": "accesscontextmanager" + }, + "aiplatform": { + "component": "aiplatform" + }, + "analytics": { + "component": "analytics" + }, + "apigateway": { + "component": "apigateway" + }, + "apigeeconnect": { + "component": "apigeeconnect" + }, + "apigeeregistry": { + "component": "apigeeregistry" + }, + "apikeys": { + "component": "apikeys" + }, + "appengine": { + "component": "appengine" + }, + "area120": { + "component": "area120" + }, + "artifactregistry": { + "component": "artifactregistry" + }, + "asset": { + "component": "asset" + }, + "assuredworkloads": { + "component": "assuredworkloads" + }, + "automl": { + "component": "automl" + }, + "baremetalsolution": { + "component": "baremetalsolution" + }, + "batch": { + "component": "batch" + }, + "beyondcorp": { + "component": "beyondcorp" + }, + "billing": { + "component": "billing" + }, + "binaryauthorization": { + "component": "binaryauthorization" + }, + "certificatemanager": { + "component": "certificatemanager" + }, + "channel": { + "component": "channel" + }, + "cloudbuild": { + "component": "cloudbuild" + }, + "clouddms": { + "component": "clouddms" + }, + "cloudtasks": { + "component": "cloudtasks" + }, + "compute": { + "component": "compute" + }, + "compute/metadata": { + "component": "compute/metadata" + }, + "contactcenterinsights": { + "component": "contactcenterinsights" + }, + "container": { + "component": "container" + }, + "containeranalysis": { + "component": "containeranalysis" + }, + "datacatalog": { + "component": "datacatalog" + }, + "dataflow": { + "component": "dataflow" + }, + "dataform": { + "component": "dataform" + }, + "datafusion": { + "component": "datafusion" + }, + "datalabeling": { + "component": "datalabeling" + }, + "dataplex": { + "component": "dataplex" + }, + "dataproc": { + "component": "dataproc" + }, + "dataqna": { + "component": "dataqna" + }, + "datastream": { + "component": "datastream" + }, + "deploy": { + "component": "deploy" + }, + "dialogflow": { + "component": "dialogflow" + }, + "dlp": { + "component": "dlp" + }, + "documentai": { + "component": "documentai" + }, + "domains": { + "component": "domains" + }, + "edgecontainer": { + "component": "edgecontainer" + }, + "essentialcontacts": { + "component": "essentialcontacts" + }, + "eventarc": { + "component": "eventarc" + }, + "filestore": { + "component": "filestore" + }, + "functions": { + "component": "functions" + }, + "gaming": { + "component": "gaming" + }, + "gkebackup": { + "component": "gkebackup" + }, + "gkeconnect": { + "component": "gkeconnect" + }, + "gkehub": { + "component": "gkehub" + }, + "gkemulticloud": { + "component": "gkemulticloud" + }, + "grafeas": { + "component": "grafeas" + }, + "gsuiteaddons": { + "component": "gsuiteaddons" + }, + "iam": { + "component": "iam" + }, + "iap": { + "component": "iap" + }, + "ids": { + "component": "ids" + }, + "iot": { + "component": "iot" + }, + "kms": { + "component": "kms" + }, + "language": { + "component": "language" + }, + "lifesciences": { + "component": "lifesciences" + }, + "longrunning": { + "component": "longrunning" + }, + "managedidentities": { + "component": "managedidentities" + }, + "maps": { + "component": "maps" + }, + "mediatranslation": { + "component": "mediatranslation" + }, + "memcache": { + "component": "memcache" + }, + "metastore": { + "component": "metastore" + }, + "monitoring": { + "component": "monitoring" + }, + "networkconnectivity": { + "component": "networkconnectivity" + }, + "networkmanagement": { + "component": "networkmanagement" + }, + "networksecurity": { + "component": "networksecurity" + }, + "notebooks": { + "component": "notebooks" + }, + "optimization": { + "component": "optimization" + }, + "orchestration": { + "component": "orchestration" + }, + "orgpolicy": { + "component": "orgpolicy" + }, + "osconfig": { + "component": "osconfig" + }, + "oslogin": { + "component": "oslogin" + }, + "phishingprotection": { + "component": "phishingprotection" + }, + "policytroubleshooter": { + "component": "policytroubleshooter" + }, + "privatecatalog": { + "component": "privatecatalog" + }, + "recaptchaenterprise/v2": { + "component": "recaptchaenterprise" + }, + "recommendationengine": { + "component": "recommendationengine" + }, + "recommender": { + "component": "recommender" + }, + "redis": { + "component": "redis" + }, + "resourcemanager": { + "component": "resourcemanager" + }, + "resourcesettings": { + "component": "resourcesettings" + }, + "retail": { + "component": "retail" + }, + "run": { + "component": "run" + }, + "scheduler": { + "component": "scheduler" + }, + "secretmanager": { + "component": "secretmanager" + }, + "security": { + "component": "security" + }, + "securitycenter": { + "component": "securitycenter" + }, + "servicecontrol": { + "component": "servicecontrol" + }, + "servicedirectory": { + "component": "servicedirectory" + }, + "servicemanagement": { + "component": "servicemanagement" + }, + "serviceusage": { + "component": "serviceusage" + }, + "shell": { + "component": "shell" + }, + "speech": { + "component": "speech" + }, + "storagetransfer": { + "component": "storagetransfer" + }, + "talent": { + "component": "talent" + }, + "texttospeech": { + "component": "texttospeech" + }, + "tpu": { + "component": "tpu" + }, + "trace": { + "component": "trace" + }, + "translate": { + "component": "translate" + }, + "video": { + "component": "video" + }, + "videointelligence": { + "component": "videointelligence" + }, + "vision/v2": { + "component": "vision" + }, + "vmmigration": { + "component": "vmmigration" + }, + "vpcaccess": { + "component": "vpcaccess" + }, + "webrisk": { + "component": "webrisk" + }, + "websecurityscanner": { + "component": "websecurityscanner" + }, + "workflows": { + "component": "workflows" + } + }, + "plugins": ["sentence-case"] +} diff --git a/vendor/cloud.google.com/go/release-please-config.json b/vendor/cloud.google.com/go/release-please-config.json new file mode 100644 index 0000000000000..1400245b8a3be --- /dev/null +++ b/vendor/cloud.google.com/go/release-please-config.json @@ -0,0 +1,11 @@ +{ + "release-type": "go-yoshi", + "separate-pull-requests": true, + "include-component-in-tag": false, + "packages": { + ".": { + "component": "main" + } + }, + "plugins": ["sentence-case"] +} diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index f6d57be5085db..3f4097faea130 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,5 +1,265 @@ # Changes + +## [1.29.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.1...storage/v1.29.0) (2023-01-19) + + +### Features + +* **storage:** Add ComponentCount as part of ObjectAttrs ([#7230](https://github.com/googleapis/google-cloud-go/issues/7230)) ([a19bca6](https://github.com/googleapis/google-cloud-go/commit/a19bca60704b4fbb674cf51d828580aa653c8210)) +* **storage:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0)) + + +### Documentation + +* **storage/internal:** Corrected typos and spellings ([7357077](https://github.com/googleapis/google-cloud-go/commit/735707796d81d7f6f32fc3415800c512fe62297e)) + +## [1.28.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.0...storage/v1.28.1) (2022-12-02) + + +### Bug Fixes + +* **storage:** downgrade some dependencies ([7540152](https://github.com/googleapis/google-cloud-go/commit/754015236d5af7c82a75da218b71a87b9ead6eb5)) + +## [1.28.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.27.0...storage/v1.28.0) (2022-11-03) + + +### Features + +* **storage/internal:** Add routing annotations ([ce3f945](https://github.com/googleapis/google-cloud-go/commit/ce3f9458e511eca0910992763232abbcd64698f1)) +* **storage:** Add Autoclass support ([#6828](https://github.com/googleapis/google-cloud-go/issues/6828)) ([f7c7f41](https://github.com/googleapis/google-cloud-go/commit/f7c7f41e4d7fcffe05860e1114cb20f40c869da8)) + + +### Bug Fixes + +* **storage:** Fix read-write race in Writer.Write ([#6817](https://github.com/googleapis/google-cloud-go/issues/6817)) ([4766d3e](https://github.com/googleapis/google-cloud-go/commit/4766d3e1004119b93c6bd352024b5bf3404252eb)) +* **storage:** Fix request token passing for Copier.Run ([#6863](https://github.com/googleapis/google-cloud-go/issues/6863)) ([faaab06](https://github.com/googleapis/google-cloud-go/commit/faaab066d8e509dc440bcbc87391557ecee7dbf2)), refs [#6857](https://github.com/googleapis/google-cloud-go/issues/6857) + + +### Documentation + +* **storage:** Update broken links for SignURL and PostPolicy ([#6779](https://github.com/googleapis/google-cloud-go/issues/6779)) ([776138b](https://github.com/googleapis/google-cloud-go/commit/776138bc06a1e5fd45acbf8f9d36e9dc6ce31dd3)) + +## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.26.0...storage/v1.27.0) (2022-09-22) + + +### Features + +* **storage:** Find GoogleAccessID when using impersonated creds ([#6591](https://github.com/googleapis/google-cloud-go/issues/6591)) ([a2d16a7](https://github.com/googleapis/google-cloud-go/commit/a2d16a7a778c85d13217fc67955ec5dac1da34e8)) + +## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.25.0...storage/v1.26.0) (2022-08-29) + + +### Features + +* **storage:** export ShouldRetry ([#6370](https://github.com/googleapis/google-cloud-go/issues/6370)) ([0da9ab0](https://github.com/googleapis/google-cloud-go/commit/0da9ab0831540569dc04c0a23437b084b1564e15)), refs [#6362](https://github.com/googleapis/google-cloud-go/issues/6362) + + +### Bug Fixes + +* **storage:** allow to use age=0 in OLM conditions ([#6204](https://github.com/googleapis/google-cloud-go/issues/6204)) ([c85704f](https://github.com/googleapis/google-cloud-go/commit/c85704f4284626ce728cb48f3b130f2ce2a0165e)) + +## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.24.0...storage/v1.25.0) (2022-08-11) + + +### Features + +* **storage/internal:** Add routing annotations ([8a8ba85](https://github.com/googleapis/google-cloud-go/commit/8a8ba85311f85701c97fd7c10f1d88b738ce423f)) +* **storage:** refactor to use transport-agnostic interface ([#6465](https://github.com/googleapis/google-cloud-go/issues/6465)) ([d03c3e1](https://github.com/googleapis/google-cloud-go/commit/d03c3e15a79fe9afa1232d9c8bd4c484a9bb927e)) + +## [1.24.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.23.0...storage/v1.24.0) (2022-07-20) + + +### Features + +* **storage:** add Custom Placement Config Dual Region Support ([#6294](https://github.com/googleapis/google-cloud-go/issues/6294)) ([5a8c607](https://github.com/googleapis/google-cloud-go/commit/5a8c607e3a9a3265887e27cb13f8943f3e3fa23d)) + +## [1.23.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.1...storage/v1.23.0) (2022-06-23) + + +### Features + +* **storage:** add support for OLM Prefix/Suffix ([#5929](https://github.com/googleapis/google-cloud-go/issues/5929)) ([ec21d10](https://github.com/googleapis/google-cloud-go/commit/ec21d10d6d1b01aa97a52560319775041707690d)) +* **storage:** support AbortIncompleteMultipartUpload LifecycleAction ([#5812](https://github.com/googleapis/google-cloud-go/issues/5812)) ([fdec929](https://github.com/googleapis/google-cloud-go/commit/fdec929b9da6e01dda0ab3c72544d44d6bd82bd4)), refs [#5795](https://github.com/googleapis/google-cloud-go/issues/5795) + + +### Bug Fixes + +* **storage:** allow for Age *int64 type and int64 type ([#6230](https://github.com/googleapis/google-cloud-go/issues/6230)) ([cc7acb8](https://github.com/googleapis/google-cloud-go/commit/cc7acb8bffb31828e9e96d4834a65f9728494473)) + +### [1.22.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.0...storage/v1.22.1) (2022-05-19) + + +### Bug Fixes + +* **storage:** bump genproto, remove deadcode ([#6059](https://github.com/googleapis/google-cloud-go/issues/6059)) ([bb10f9f](https://github.com/googleapis/google-cloud-go/commit/bb10f9faca57dc3b987e0fb601090887b3507f07)) +* **storage:** remove field that no longer exists ([#6061](https://github.com/googleapis/google-cloud-go/issues/6061)) ([ee150cf](https://github.com/googleapis/google-cloud-go/commit/ee150cfd194463ddfcb59898cfb0237e47777973)) + +## [1.22.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.21.0...storage/v1.22.0) (2022-03-31) + + +### Features + +* **storage:** allow specifying includeTrailingDelimiter ([#5617](https://github.com/googleapis/google-cloud-go/issues/5617)) ([a34503b](https://github.com/googleapis/google-cloud-go/commit/a34503bc0f0b95399285e8db66976b227e3b0072)) +* **storage:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) + + +### Bug Fixes + +* **storage:** respect STORAGE_EMULATOR_HOST in signedURL ([#5673](https://github.com/googleapis/google-cloud-go/issues/5673)) ([1c249ae](https://github.com/googleapis/google-cloud-go/commit/1c249ae5b4980cf53fa74635943ca8bf6a96a341)) + +## [1.21.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.20.0...storage/v1.21.0) (2022-02-17) + + +### Features + +* **storage:** add better version metadata to calls ([#5507](https://github.com/googleapis/google-cloud-go/issues/5507)) ([13fe0bc](https://github.com/googleapis/google-cloud-go/commit/13fe0bc0d8acbffd46b59ab69b25449f1cbd6a88)), refs [#2749](https://github.com/googleapis/google-cloud-go/issues/2749) +* **storage:** add Writer.ChunkRetryDeadline ([#5482](https://github.com/googleapis/google-cloud-go/issues/5482)) ([498a746](https://github.com/googleapis/google-cloud-go/commit/498a746769fa43958b92af8875b927879947128e)) + +## [1.20.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.19.0...storage/v1.20.0) (2022-02-04) + + +### Features + +* **storage/internal:** Update definition of RewriteObjectRequest to bring to parity with JSON API support ([#5447](https://www.github.com/googleapis/google-cloud-go/issues/5447)) ([7d175ef](https://www.github.com/googleapis/google-cloud-go/commit/7d175ef12b7b3e75585427f5dd2aab4a175e92d6)) + +## [1.19.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.2...storage/v1.19.0) (2022-01-25) + + +### Features + +* **storage:** add fully configurable and idempotency-aware retry strategy ([#5384](https://www.github.com/googleapis/google-cloud-go/issues/5384), [#5185](https://www.github.com/googleapis/google-cloud-go/issues/5185), [#5170](https://www.github.com/googleapis/google-cloud-go/issues/5170), [#5223](https://www.github.com/googleapis/google-cloud-go/issues/5223), [#5221](https://www.github.com/googleapis/google-cloud-go/issues/5221), [#5193](https://www.github.com/googleapis/google-cloud-go/issues/5193), [#5159](https://www.github.com/googleapis/google-cloud-go/issues/5159), [#5165](https://www.github.com/googleapis/google-cloud-go/issues/5165), [#5166](https://www.github.com/googleapis/google-cloud-go/issues/5166), [#5210](https://www.github.com/googleapis/google-cloud-go/issues/5210), [#5172](https://www.github.com/googleapis/google-cloud-go/issues/5172), [#5314](https://www.github.com/googleapis/google-cloud-go/issues/5314)) + * This release contains changes to fully align this library's retry strategy + with best practices as described in the + Cloud Storage [docs](https://cloud.google.com/storage/docs/retry-strategy). + * The library will now retry only idempotent operations by default. This means + that for certain operations, including object upload, compose, rewrite, + update, and delete, requests will not be retried by default unless + [idempotency conditions](https://cloud.google.com/storage/docs/retry-strategy#idempotency) + for the request have been met. + * The library now has methods to configure aspects of retry policy for + API calls, including which errors are retried, the timing of the + exponential backoff, and how idempotency is taken into account. + * If you wish to re-enable retries for a non-idempotent request, use the + [RetryAlways](https://pkg.go.dev/cloud.google.com/go/storage@main#RetryAlways) + policy. + * For full details on how to configure retries, see the + [package docs](https://pkg.go.dev/cloud.google.com/go/storage@main#hdr-Retrying_failed_requests) + and the + [Cloud Storage docs](https://cloud.google.com/storage/docs/retry-strategy) +* **storage:** GenerateSignedPostPolicyV4 can use existing creds to authenticate ([#5105](https://www.github.com/googleapis/google-cloud-go/issues/5105)) ([46489f4](https://www.github.com/googleapis/google-cloud-go/commit/46489f4c8a634068a3e7cf2fd5e5ca11b555c0a8)) +* **storage:** post policy can be signed with a fn that takes raw bytes ([#5079](https://www.github.com/googleapis/google-cloud-go/issues/5079)) ([25d1278](https://www.github.com/googleapis/google-cloud-go/commit/25d1278cab539fbfdd8563ed6b297e30d3fe555c)) +* **storage:** add rpo (turbo replication) support ([#5003](https://www.github.com/googleapis/google-cloud-go/issues/5003)) ([3bd5995](https://www.github.com/googleapis/google-cloud-go/commit/3bd59958e0c06d2655b67fcb5410668db3c52af0)) + +### Bug Fixes + +* **storage:** fix nil check in gRPC Reader ([#5376](https://www.github.com/googleapis/google-cloud-go/issues/5376)) ([5e7d722](https://www.github.com/googleapis/google-cloud-go/commit/5e7d722d18a62b28ba98169b3bdbb49401377264)) + +### [1.18.2](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.1...storage/v1.18.2) (2021-10-18) + + +### Bug Fixes + +* **storage:** upgrade genproto ([#4993](https://www.github.com/googleapis/google-cloud-go/issues/4993)) ([5ca462d](https://www.github.com/googleapis/google-cloud-go/commit/5ca462d99fe851b7cddfd70108798e2fa959bdfd)), refs [#4991](https://www.github.com/googleapis/google-cloud-go/issues/4991) + +### [1.18.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.0...storage/v1.18.1) (2021-10-14) + + +### Bug Fixes + +* **storage:** don't assume auth from a client option ([#4982](https://www.github.com/googleapis/google-cloud-go/issues/4982)) ([e17334d](https://www.github.com/googleapis/google-cloud-go/commit/e17334d1fe7645d89d14ae7148313498b984dfbb)) + +## [1.18.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.17.0...storage/v1.18.0) (2021-10-11) + + +### Features + +* **storage:** returned wrapped error for timeouts ([#4802](https://www.github.com/googleapis/google-cloud-go/issues/4802)) ([0e102a3](https://www.github.com/googleapis/google-cloud-go/commit/0e102a385dc67a06f6b444b3a93e6998428529be)), refs [#4197](https://www.github.com/googleapis/google-cloud-go/issues/4197) +* **storage:** SignedUrl can use existing creds to authenticate ([#4604](https://www.github.com/googleapis/google-cloud-go/issues/4604)) ([b824c89](https://www.github.com/googleapis/google-cloud-go/commit/b824c897e6941270747b612f6d36a8d6ae081315)) + + +### Bug Fixes + +* **storage:** update PAP to use inherited instead of unspecified ([#4909](https://www.github.com/googleapis/google-cloud-go/issues/4909)) ([dac26b1](https://www.github.com/googleapis/google-cloud-go/commit/dac26b1af2f2972f12775341173bcc5f982438b8)) + +## [1.17.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.1...storage/v1.17.0) (2021-09-28) + + +### Features + +* **storage:** add projectNumber field to bucketAttrs. ([#4805](https://www.github.com/googleapis/google-cloud-go/issues/4805)) ([07343af](https://www.github.com/googleapis/google-cloud-go/commit/07343afc15085b164cc41d202d13f9d46f5c0d02)) + + +### Bug Fixes + +* **storage:** align retry idempotency (part 1) ([#4715](https://www.github.com/googleapis/google-cloud-go/issues/4715)) ([ffa903e](https://www.github.com/googleapis/google-cloud-go/commit/ffa903eeec61aa3869e5220e2f09371127b5c393)) + +### [1.16.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.0...storage/v1.16.1) (2021-08-30) + + +### Bug Fixes + +* **storage/internal:** Update encryption_key fields to "bytes" type. fix: Improve date/times and field name clarity in lifecycle conditions. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) +* **storage:** accept emulator env var without scheme ([#4616](https://www.github.com/googleapis/google-cloud-go/issues/4616)) ([5f8cbb9](https://www.github.com/googleapis/google-cloud-go/commit/5f8cbb98070109e2a34409ac775ed63b94d37efd)) +* **storage:** preserve supplied endpoint's scheme ([#4609](https://www.github.com/googleapis/google-cloud-go/issues/4609)) ([ee2756f](https://www.github.com/googleapis/google-cloud-go/commit/ee2756fb0a335d591464a770c9fa4f8fe0ba2e01)) +* **storage:** remove unnecessary variable ([#4608](https://www.github.com/googleapis/google-cloud-go/issues/4608)) ([27fc784](https://www.github.com/googleapis/google-cloud-go/commit/27fc78456fb251652bdf5cdb493734a7e1e643e1)) +* **storage:** retry LockRetentionPolicy ([#4439](https://www.github.com/googleapis/google-cloud-go/issues/4439)) ([09879ea](https://www.github.com/googleapis/google-cloud-go/commit/09879ea80cb67f9bfd8fc9384b0fda335567cba9)), refs [#4437](https://www.github.com/googleapis/google-cloud-go/issues/4437) +* **storage:** revise Reader to send XML preconditions ([#4479](https://www.github.com/googleapis/google-cloud-go/issues/4479)) ([e36b29a](https://www.github.com/googleapis/google-cloud-go/commit/e36b29a3d43bce5c1c044f7daf6e1db00b0a49e0)), refs [#4470](https://www.github.com/googleapis/google-cloud-go/issues/4470) + +## [1.16.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.15.0...storage/v1.16.0) (2021-06-28) + + +### Features + +* **storage:** support PublicAccessPrevention ([#3608](https://www.github.com/googleapis/google-cloud-go/issues/3608)) ([99bc782](https://www.github.com/googleapis/google-cloud-go/commit/99bc782fb50a47602b45278384ef5d5b5da9263b)), refs [#3203](https://www.github.com/googleapis/google-cloud-go/issues/3203) + + +### Bug Fixes + +* **storage:** fix Writer.ChunkSize validation ([#4255](https://www.github.com/googleapis/google-cloud-go/issues/4255)) ([69c2e9d](https://www.github.com/googleapis/google-cloud-go/commit/69c2e9dc6303e1a004d3104a8178532fa738e742)), refs [#4167](https://www.github.com/googleapis/google-cloud-go/issues/4167) +* **storage:** try to reopen for failed Reads ([#4226](https://www.github.com/googleapis/google-cloud-go/issues/4226)) ([564102b](https://www.github.com/googleapis/google-cloud-go/commit/564102b335dbfb558bec8af883e5f898efb5dd10)), refs [#3040](https://www.github.com/googleapis/google-cloud-go/issues/3040) + +## [1.15.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.13.0...storage/v1.15.0) (2021-04-21) + + +### Features + +* **transport** Bump dependency on google.golang.org/api to pick up HTTP/2 + config updates (see [googleapis/google-api-go-client#882](https://github.com/googleapis/google-api-go-client/pull/882)). + +### Bug Fixes + +* **storage:** retry io.ErrUnexpectedEOF ([#3957](https://www.github.com/googleapis/google-cloud-go/issues/3957)) ([f6590cd](https://www.github.com/googleapis/google-cloud-go/commit/f6590cdc26c8479be5df48949fa59f879e0c24fc)) + + +## v1.14.0 + +- Updates to various dependencies. + +## [1.13.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.12.0...v1.13.0) (2021-02-03) + + +### Features + +* **storage:** add missing StorageClass in BucketAttrsToUpdate ([#3038](https://www.github.com/googleapis/google-cloud-go/issues/3038)) ([2fa1b72](https://www.github.com/googleapis/google-cloud-go/commit/2fa1b727f8a7b20aa62fe0990530744f6c109be0)) +* **storage:** add projection parameter for BucketHandle.Objects() ([#3549](https://www.github.com/googleapis/google-cloud-go/issues/3549)) ([9b9c3dc](https://www.github.com/googleapis/google-cloud-go/commit/9b9c3dce3ee10af5b6c4d070821bf47a861efd5b)) + + +### Bug Fixes + +* **storage:** fix endpoint selection logic ([#3172](https://www.github.com/googleapis/google-cloud-go/issues/3172)) ([99edf0d](https://www.github.com/googleapis/google-cloud-go/commit/99edf0d211a9e617f2586fbc83b6f9630da3c537)) + +## v1.12.0 +- V4 signed URL fixes: + - Fix encoding of spaces in query parameters. + - Add fields that were missing from PostPolicyV4 policy conditions. +- Fix Query to correctly list prefixes as well as objects when SetAttrSelection + is used. + +## v1.11.0 +- Add support for CustomTime and NoncurrentTime object lifecycle management + features. + ## v1.10.0 - Bump dependency on google.golang.org/api to capture changes to retry logic which will make retries on writes more resilient. diff --git a/vendor/cloud.google.com/go/storage/README.md b/vendor/cloud.google.com/go/storage/README.md index a2253c4bb5a90..b2f411210ca0b 100644 --- a/vendor/cloud.google.com/go/storage/README.md +++ b/vendor/cloud.google.com/go/storage/README.md @@ -1,9 +1,9 @@ -## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) +## Cloud Storage [![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/storage.svg)](https://pkg.go.dev/cloud.google.com/go/storage) - [About Cloud Storage](https://cloud.google.com/storage/) - [API documentation](https://cloud.google.com/storage/docs) -- [Go client documentation](https://godoc.org/cloud.google.com/go/storage) -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage) +- [Go client documentation](https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/storage) ### Example Usage @@ -25,8 +25,8 @@ if err != nil { log.Fatal(err) } defer rc.Close() -body, err := ioutil.ReadAll(rc) +body, err := io.ReadAll(rc) if err != nil { log.Fatal(err) } -``` \ No newline at end of file +``` diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go index 7855d110ad455..e0ab60073c2f4 100644 --- a/vendor/cloud.google.com/go/storage/acl.go +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -20,7 +20,7 @@ import ( "reflect" "cloud.google.com/go/internal/trace" - "google.golang.org/api/googleapi" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" raw "google.golang.org/api/storage/v1" ) @@ -66,12 +66,15 @@ type ProjectTeam struct { } // ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. +// ACLHandle on an object operates on the latest generation of that object by default. +// Selecting a specific generation of an object is not currently supported by the client. type ACLHandle struct { c *Client bucket string object string isDefault bool userProject string // for requester-pays buckets + retry *retryConfig } // Delete permanently deletes the ACL entry for the given entity. @@ -117,114 +120,46 @@ func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) { } func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { - var acls *raw.ObjectAccessControls - var err error - err = runWithRetry(ctx, func() error { - req := a.c.raw.DefaultObjectAccessControls.List(a.bucket) - a.configureCall(ctx, req) - acls, err = req.Do() - return err - }) - if err != nil { - return nil, err - } - return toObjectACLRules(acls.Items), nil + opts := makeStorageOpts(true, a.retry, a.userProject) + return a.c.tc.ListDefaultObjectACLs(ctx, a.bucket, opts...) } func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { - return runWithRetry(ctx, func() error { - req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)) - a.configureCall(ctx, req) - return req.Do() - }) + opts := makeStorageOpts(false, a.retry, a.userProject) + return a.c.tc.DeleteDefaultObjectACL(ctx, a.bucket, entity, opts...) } func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { - var acls *raw.BucketAccessControls - var err error - err = runWithRetry(ctx, func() error { - req := a.c.raw.BucketAccessControls.List(a.bucket) - a.configureCall(ctx, req) - acls, err = req.Do() - return err - }) - if err != nil { - return nil, err - } - return toBucketACLRules(acls.Items), nil + opts := makeStorageOpts(true, a.retry, a.userProject) + return a.c.tc.ListBucketACLs(ctx, a.bucket, opts...) } func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { - acl := &raw.BucketAccessControl{ - Bucket: a.bucket, - Entity: string(entity), - Role: string(role), - } - err := runWithRetry(ctx, func() error { - req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl) - a.configureCall(ctx, req) - _, err := req.Do() - return err - }) - if err != nil { - return err - } - return nil + opts := makeStorageOpts(false, a.retry, a.userProject) + return a.c.tc.UpdateBucketACL(ctx, a.bucket, entity, role, opts...) } func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { - return runWithRetry(ctx, func() error { - req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)) - a.configureCall(ctx, req) - return req.Do() - }) + opts := makeStorageOpts(false, a.retry, a.userProject) + return a.c.tc.DeleteBucketACL(ctx, a.bucket, entity, opts...) } func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { - var acls *raw.ObjectAccessControls - var err error - err = runWithRetry(ctx, func() error { - req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object) - a.configureCall(ctx, req) - acls, err = req.Do() - return err - }) - if err != nil { - return nil, err - } - return toObjectACLRules(acls.Items), nil + opts := makeStorageOpts(true, a.retry, a.userProject) + return a.c.tc.ListObjectACLs(ctx, a.bucket, a.object, opts...) } func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error { - type setRequest interface { - Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) - Header() http.Header - } - - acl := &raw.ObjectAccessControl{ - Bucket: a.bucket, - Entity: string(entity), - Role: string(role), - } - var req setRequest + opts := makeStorageOpts(false, a.retry, a.userProject) if isBucketDefault { - req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl) - } else { - req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl) + return a.c.tc.UpdateDefaultObjectACL(ctx, a.bucket, entity, role, opts...) } - a.configureCall(ctx, req) - return runWithRetry(ctx, func() error { - _, err := req.Do() - return err - }) + return a.c.tc.UpdateObjectACL(ctx, a.bucket, a.object, entity, role, opts...) } func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { - return runWithRetry(ctx, func() error { - req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)) - a.configureCall(ctx, req) - return req.Do() - }) + opts := makeStorageOpts(false, a.retry, a.userProject) + return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...) } func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) { @@ -244,6 +179,14 @@ func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule { return rs } +func toObjectACLRulesFromProto(items []*storagepb.ObjectAccessControl) []ACLRule { + var rs []ACLRule + for _, item := range items { + rs = append(rs, toObjectACLRuleFromProto(item)) + } + return rs +} + func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule { var rs []ACLRule for _, item := range items { @@ -252,6 +195,14 @@ func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule { return rs } +func toBucketACLRulesFromProto(items []*storagepb.BucketAccessControl) []ACLRule { + var rs []ACLRule + for _, item := range items { + rs = append(rs, toBucketACLRuleFromProto(item)) + } + return rs +} + func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule { return ACLRule{ Entity: ACLEntity(a.Entity), @@ -263,6 +214,17 @@ func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule { } } +func toObjectACLRuleFromProto(a *storagepb.ObjectAccessControl) ACLRule { + return ACLRule{ + Entity: ACLEntity(a.GetEntity()), + EntityID: a.GetEntityId(), + Role: ACLRole(a.GetRole()), + Domain: a.GetDomain(), + Email: a.GetEmail(), + ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()), + } +} + func toBucketACLRule(a *raw.BucketAccessControl) ACLRule { return ACLRule{ Entity: ACLEntity(a.Entity), @@ -274,6 +236,17 @@ func toBucketACLRule(a *raw.BucketAccessControl) ACLRule { } } +func toBucketACLRuleFromProto(a *storagepb.BucketAccessControl) ACLRule { + return ACLRule{ + Entity: ACLEntity(a.GetEntity()), + EntityID: a.GetEntityId(), + Role: ACLRole(a.GetRole()), + Domain: a.GetDomain(), + Email: a.GetEmail(), + ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()), + } +} + func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl { if len(rules) == 0 { return nil @@ -285,6 +258,17 @@ func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl { return r } +func toProtoObjectACL(rules []ACLRule) []*storagepb.ObjectAccessControl { + if len(rules) == 0 { + return nil + } + r := make([]*storagepb.ObjectAccessControl, 0, len(rules)) + for _, rule := range rules { + r = append(r, rule.toProtoObjectAccessControl("")) // bucket name unnecessary + } + return r +} + func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl { if len(rules) == 0 { return nil @@ -296,6 +280,17 @@ func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl { return r } +func toProtoBucketACL(rules []ACLRule) []*storagepb.BucketAccessControl { + if len(rules) == 0 { + return nil + } + r := make([]*storagepb.BucketAccessControl, 0, len(rules)) + for _, rule := range rules { + r = append(r, rule.toProtoBucketAccessControl()) + } + return r +} + func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl { return &raw.BucketAccessControl{ Bucket: bucket, @@ -314,6 +309,22 @@ func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessContro } } +func (r ACLRule) toProtoObjectAccessControl(bucket string) *storagepb.ObjectAccessControl { + return &storagepb.ObjectAccessControl{ + Entity: string(r.Entity), + Role: string(r.Role), + // The other fields are not settable. + } +} + +func (r ACLRule) toProtoBucketAccessControl() *storagepb.BucketAccessControl { + return &storagepb.BucketAccessControl{ + Entity: string(r.Entity), + Role: string(r.Role), + // The other fields are not settable. + } +} + func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam { if p == nil { return nil @@ -324,6 +335,16 @@ func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam { } } +func toProjectTeamFromProto(p *storagepb.ProjectTeam) *ProjectTeam { + if p == nil { + return nil + } + return &ProjectTeam{ + ProjectNumber: p.GetProjectNumber(), + Team: p.GetTeam(), + } +} + func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam { if p == nil { return nil diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 478482645fabe..19f266ef1e35b 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -16,16 +16,26 @@ package storage import ( "context" + "encoding/base64" + "encoding/json" + "errors" "fmt" - "net/http" "reflect" + "strings" "time" + "cloud.google.com/go/compute/metadata" "cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/trace" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" "google.golang.org/api/googleapi" + "google.golang.org/api/iamcredentials/v1" "google.golang.org/api/iterator" + "google.golang.org/api/option" raw "google.golang.org/api/storage/v1" + dpb "google.golang.org/genproto/googleapis/type/date" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" ) // BucketHandle provides operations on a Google Cloud Storage bucket. @@ -37,6 +47,7 @@ type BucketHandle struct { defaultObjectACL ACLHandle conds *BucketConditions userProject string // project for Requester Pays buckets + retry *retryConfig } // Bucket returns a BucketHandle, which provides operations on the named bucket. @@ -45,20 +56,25 @@ type BucketHandle struct { // The supplied name must contain only lowercase letters, numbers, dashes, // underscores, and dots. The full specification for valid bucket names can be // found at: -// https://cloud.google.com/storage/docs/bucket-naming +// +// https://cloud.google.com/storage/docs/bucket-naming func (c *Client) Bucket(name string) *BucketHandle { + retry := c.retry.clone() return &BucketHandle{ c: c, name: name, acl: ACLHandle{ c: c, bucket: name, + retry: retry, }, defaultObjectACL: ACLHandle{ c: c, bucket: name, isDefault: true, + retry: retry, }, + retry: retry, } } @@ -68,27 +84,11 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") defer func() { trace.EndSpan(ctx, err) }() - var bkt *raw.Bucket - if attrs != nil { - bkt = attrs.toRawBucket() - } else { - bkt = &raw.Bucket{} - } - bkt.Name = b.name - // If there is lifecycle information but no location, explicitly set - // the location. This is a GCS quirk/bug. - if bkt.Location == "" && bkt.Lifecycle != nil { - bkt.Location = "US" - } - req := b.c.raw.Buckets.Insert(projectID, bkt) - setClientHeader(req.Header()) - if attrs != nil && attrs.PredefinedACL != "" { - req.PredefinedAcl(attrs.PredefinedACL) - } - if attrs != nil && attrs.PredefinedDefaultObjectACL != "" { - req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL) + o := makeStorageOpts(true, b.retry, b.userProject) + if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, o...); err != nil { + return err } - return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err }) + return nil } // Delete deletes the Bucket. @@ -96,23 +96,8 @@ func (b *BucketHandle) Delete(ctx context.Context) (err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete") defer func() { trace.EndSpan(ctx, err) }() - req, err := b.newDeleteCall() - if err != nil { - return err - } - return runWithRetry(ctx, func() error { return req.Context(ctx).Do() }) -} - -func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) { - req := b.c.raw.Buckets.Delete(b.name) - setClientHeader(req.Header()) - if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil { - return nil, err - } - if b.userProject != "" { - req.UserProject(b.userProject) - } - return req, nil + o := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.DeleteBucket(ctx, b.name, b.conds, o...) } // ACL returns an ACLHandle, which provides access to the bucket's access control list. @@ -130,12 +115,15 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle { } // Object returns an ObjectHandle, which provides operations on the named object. -// This call does not perform any network operations. +// This call does not perform any network operations such as fetching the object or verifying its existence. +// Use methods on ObjectHandle to perform network operations. // // name must consist entirely of valid UTF-8-encoded runes. The full specification // for valid object names can be found at: -// https://cloud.google.com/storage/docs/bucket-naming +// +// https://cloud.google.com/storage/docs/naming-objects func (b *BucketHandle) Object(name string) *ObjectHandle { + retry := b.retry.clone() return &ObjectHandle{ c: b.c, bucket: b.name, @@ -145,9 +133,11 @@ func (b *BucketHandle) Object(name string) *ObjectHandle { bucket: b.name, object: name, userProject: b.userProject, + retry: retry, }, gen: -1, userProject: b.userProject, + retry: retry, } } @@ -156,34 +146,8 @@ func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs") defer func() { trace.EndSpan(ctx, err) }() - req, err := b.newGetCall() - if err != nil { - return nil, err - } - var resp *raw.Bucket - err = runWithRetry(ctx, func() error { - resp, err = req.Context(ctx).Do() - return err - }) - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrBucketNotExist - } - if err != nil { - return nil, err - } - return newBucket(resp) -} - -func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) { - req := b.c.raw.Buckets.Get(b.name).Projection("full") - setClientHeader(req.Header()) - if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil { - return nil, err - } - if b.userProject != "" { - req.UserProject(b.userProject) - } - return req, nil + o := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.GetBucket(ctx, b.name, b.conds, o...) } // Update updates a bucket's attributes. @@ -191,35 +155,165 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) ( ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") defer func() { trace.EndSpan(ctx, err) }() - req, err := b.newPatchCall(&uattrs) - if err != nil { - return nil, err + isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0 + o := makeStorageOpts(isIdempotent, b.retry, b.userProject) + return b.c.tc.UpdateBucket(ctx, b.name, &uattrs, b.conds, o...) +} + +// SignedURL returns a URL for the specified object. Signed URLs allow anyone +// access to a restricted resource for a limited time without needing a Google +// account or signing in. +// For more information about signed URLs, see "[Overview of access control]." +// +// This method requires the Method and Expires fields in the specified +// SignedURLOptions to be non-nil. You may need to set the GoogleAccessID and +// PrivateKey fields in some cases. Read more on the [automatic detection of credentials] +// for this method. +// +// [Overview of access control]: https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication +// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing +func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, error) { + if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) { + return SignedURL(b.name, object, opts) } - if uattrs.PredefinedACL != "" { - req.PredefinedAcl(uattrs.PredefinedACL) + // Make a copy of opts so we don't modify the pointer parameter. + newopts := opts.clone() + + if newopts.GoogleAccessID == "" { + id, err := b.detectDefaultGoogleAccessID() + if err != nil { + return "", err + } + newopts.GoogleAccessID = id + } + if newopts.SignBytes == nil && len(newopts.PrivateKey) == 0 { + if b.c.creds != nil && len(b.c.creds.JSON) > 0 { + var sa struct { + PrivateKey string `json:"private_key"` + } + err := json.Unmarshal(b.c.creds.JSON, &sa) + if err == nil && sa.PrivateKey != "" { + newopts.PrivateKey = []byte(sa.PrivateKey) + } + } + + // Don't error out if we can't unmarshal the private key from the client, + // fallback to the default sign function for the service account. + if len(newopts.PrivateKey) == 0 { + newopts.SignBytes = b.defaultSignBytesFunc(newopts.GoogleAccessID) + } } - if uattrs.PredefinedDefaultObjectACL != "" { - req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL) + return SignedURL(b.name, object, newopts) +} + +// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts. +// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads. +// +// This method requires the Expires field in the specified PostPolicyV4Options +// to be non-nil. You may need to set the GoogleAccessID and PrivateKey fields +// in some cases. Read more on the [automatic detection of credentials] for this method. +// +// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing +func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { + if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) { + return GenerateSignedPostPolicyV4(b.name, object, opts) } - // TODO(jba): retry iff metagen is set? - rb, err := req.Context(ctx).Do() - if err != nil { - return nil, err + // Make a copy of opts so we don't modify the pointer parameter. + newopts := opts.clone() + + if newopts.GoogleAccessID == "" { + id, err := b.detectDefaultGoogleAccessID() + if err != nil { + return nil, err + } + newopts.GoogleAccessID = id + } + if newopts.SignBytes == nil && newopts.SignRawBytes == nil && len(newopts.PrivateKey) == 0 { + if b.c.creds != nil && len(b.c.creds.JSON) > 0 { + var sa struct { + PrivateKey string `json:"private_key"` + } + err := json.Unmarshal(b.c.creds.JSON, &sa) + if err == nil && sa.PrivateKey != "" { + newopts.PrivateKey = []byte(sa.PrivateKey) + } + } + + // Don't error out if we can't unmarshal the private key from the client, + // fallback to the default sign function for the service account. + if len(newopts.PrivateKey) == 0 { + newopts.SignRawBytes = b.defaultSignBytesFunc(newopts.GoogleAccessID) + } } - return newBucket(rb) + return GenerateSignedPostPolicyV4(b.name, object, newopts) } -func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) { - rb := uattrs.toRawBucket() - req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full") - setClientHeader(req.Header()) - if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil { - return nil, err +func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) { + returnErr := errors.New("no credentials found on client and not on GCE (Google Compute Engine)") + + if b.c.creds != nil && len(b.c.creds.JSON) > 0 { + var sa struct { + ClientEmail string `json:"client_email"` + SAImpersonationURL string `json:"service_account_impersonation_url"` + CredType string `json:"type"` + } + + err := json.Unmarshal(b.c.creds.JSON, &sa) + if err != nil { + returnErr = err + } else if sa.CredType == "impersonated_service_account" { + start, end := strings.LastIndex(sa.SAImpersonationURL, "/"), strings.LastIndex(sa.SAImpersonationURL, ":") + + if end <= start { + returnErr = errors.New("error parsing impersonated service account credentials") + } else { + return sa.SAImpersonationURL[start+1 : end], nil + } + } else if sa.CredType == "service_account" && sa.ClientEmail != "" { + return sa.ClientEmail, nil + } else { + returnErr = errors.New("unable to parse credentials; only service_account and impersonated_service_account credentials are supported") + } } - if b.userProject != "" { - req.UserProject(b.userProject) + + // Don't error out if we can't unmarshal, fallback to GCE check. + if metadata.OnGCE() { + email, err := metadata.Email("default") + if err == nil && email != "" { + return email, nil + } else if err != nil { + returnErr = err + } else { + returnErr = errors.New("empty email from GCE metadata service") + } + + } + return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4])", returnErr) +} + +func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte, error) { + return func(in []byte) ([]byte, error) { + ctx := context.Background() + + // It's ok to recreate this service per call since we pass in the http client, + // circumventing the cost of recreating the auth/transport layer + svc, err := iamcredentials.NewService(ctx, option.WithHTTPClient(b.c.hc)) + if err != nil { + return nil, fmt.Errorf("unable to create iamcredentials client: %w", err) + } + + resp, err := svc.Projects.ServiceAccounts.SignBlob(fmt.Sprintf("projects/-/serviceAccounts/%s", email), &iamcredentials.SignBlobRequest{ + Payload: base64.StdEncoding.EncodeToString(in), + }).Do() + if err != nil { + return nil, fmt.Errorf("unable to sign bytes: %w", err) + } + out, err := base64.StdEncoding.DecodeString(resp.SignedBlob) + if err != nil { + return nil, fmt.Errorf("unable to base64 decode response: %w", err) + } + return out, nil } - return req, nil } // BucketAttrs represents the metadata for a Google Cloud Storage bucket. @@ -244,6 +338,13 @@ type BucketAttrs struct { // for more information. UniformBucketLevelAccess UniformBucketLevelAccess + // PublicAccessPrevention is the setting for the bucket's + // PublicAccessPrevention policy, which can be used to prevent public access + // of data in the bucket. See + // https://cloud.google.com/storage/docs/public-access-prevention for more + // information. + PublicAccessPrevention PublicAccessPrevention + // DefaultObjectACL is the list of access controls to // apply to new objects when no object ACL is provided. DefaultObjectACL []ACLRule @@ -267,8 +368,13 @@ type BucketAttrs struct { PredefinedDefaultObjectACL string // Location is the location of the bucket. It defaults to "US". + // If specifying a dual-region, CustomPlacementConfig should be set in conjunction. Location string + // The bucket's custom placement configuration that holds a list of + // regional locations for custom dual regions. + CustomPlacementConfig *CustomPlacementConfig + // MetaGeneration is the metadata generation of the bucket. // This field is read-only. MetaGeneration int64 @@ -329,6 +435,21 @@ type BucketAttrs struct { // Typical values are "multi-region", "region" and "dual-region". // This field is read-only. LocationType string + + // The project number of the project the bucket belongs to. + // This field is read-only. + ProjectNumber uint64 + + // RPO configures the Recovery Point Objective (RPO) policy of the bucket. + // Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket. + // See https://cloud.google.com/storage/docs/managing-turbo-replication for + // more information. + RPO RPO + + // Autoclass holds the bucket's autoclass configuration. If enabled, + // allows for the automatic selection of the best storage class + // based on object access patterns. + Autoclass *Autoclass } // BucketPolicyOnly is an alias for UniformBucketLevelAccess. @@ -353,6 +474,47 @@ type UniformBucketLevelAccess struct { LockedTime time.Time } +// PublicAccessPrevention configures the Public Access Prevention feature, which +// can be used to disallow public access to any data in a bucket. See +// https://cloud.google.com/storage/docs/public-access-prevention for more +// information. +type PublicAccessPrevention int + +const ( + // PublicAccessPreventionUnknown is a zero value, used only if this field is + // not set in a call to GCS. + PublicAccessPreventionUnknown PublicAccessPrevention = iota + + // PublicAccessPreventionUnspecified corresponds to a value of "unspecified". + // Deprecated: use PublicAccessPreventionInherited + PublicAccessPreventionUnspecified + + // PublicAccessPreventionEnforced corresponds to a value of "enforced". This + // enforces Public Access Prevention on the bucket. + PublicAccessPreventionEnforced + + // PublicAccessPreventionInherited corresponds to a value of "inherited" + // and is the default for buckets. + PublicAccessPreventionInherited + + publicAccessPreventionUnknown string = "" + // TODO: remove unspecified when change is fully completed + publicAccessPreventionUnspecified = "unspecified" + publicAccessPreventionEnforced = "enforced" + publicAccessPreventionInherited = "inherited" +) + +func (p PublicAccessPrevention) String() string { + switch p { + case PublicAccessPreventionInherited, PublicAccessPreventionUnspecified: + return publicAccessPreventionInherited + case PublicAccessPreventionEnforced: + return publicAccessPreventionEnforced + default: + return publicAccessPreventionUnknown + } +} + // Lifecycle is the lifecycle configuration for objects in the bucket. type Lifecycle struct { Rules []LifecycleRule @@ -389,7 +551,8 @@ type RetentionPolicy struct { } const ( - // RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule. + // RFC3339 timestamp with only the date segment, used for CreatedBefore, + // CustomTimeBefore, and NoncurrentTimeBefore in LifecycleRule. rfc3339Date = "2006-01-02" // DeleteAction is a lifecycle action that deletes a live and/or archived @@ -399,6 +562,13 @@ const ( // SetStorageClassAction changes the storage class of live and/or archived // objects. SetStorageClassAction = "SetStorageClass" + + // AbortIncompleteMPUAction is a lifecycle action that aborts an incomplete + // multipart upload when the multipart upload meets the conditions specified + // in the lifecycle rule. The AgeInDays condition is the only allowed + // condition for this action. AgeInDays is measured from the time the + // multipart upload was created. + AbortIncompleteMPUAction = "AbortIncompleteMultipartUpload" ) // LifecycleRule is a lifecycle configuration rule. @@ -419,9 +589,8 @@ type LifecycleRule struct { type LifecycleAction struct { // Type is the type of action to take on matching objects. // - // Acceptable values are "Delete" to delete matching objects and - // "SetStorageClass" to set the storage class defined in StorageClass on - // matching objects. + // Acceptable values are storage.DeleteAction, storage.SetStorageClassAction, + // and storage.AbortIncompleteMPUAction. Type string // StorageClass is the storage class to set on matching objects if the Action @@ -446,7 +615,12 @@ const ( // // All configured conditions must be met for the associated action to be taken. type LifecycleCondition struct { + // AllObjects is used to select all objects in a bucket by + // setting AgeInDays to 0. + AllObjects bool + // AgeInDays is the age of the object in days. + // If you want to set AgeInDays to `0` use AllObjects set to `true`. AgeInDays int64 // CreatedBefore is the time the object was created. @@ -455,20 +629,53 @@ type LifecycleCondition struct { // the specified date in UTC. CreatedBefore time.Time + // CustomTimeBefore is the CustomTime metadata field of the object. This + // condition is satisfied when an object's CustomTime timestamp is before + // midnight of the specified date in UTC. + // + // This condition can only be satisfied if CustomTime has been set. + CustomTimeBefore time.Time + + // DaysSinceCustomTime is the days elapsed since the CustomTime date of the + // object. This condition can only be satisfied if CustomTime has been set. + // Note: Using `0` as the value will be ignored by the library and not sent to the API. + DaysSinceCustomTime int64 + + // DaysSinceNoncurrentTime is the days elapsed since the noncurrent timestamp + // of the object. This condition is relevant only for versioned objects. + // Note: Using `0` as the value will be ignored by the library and not sent to the API. + DaysSinceNoncurrentTime int64 + // Liveness specifies the object's liveness. Relevant only for versioned objects Liveness Liveness + // MatchesPrefix is the condition matching an object if any of the + // matches_prefix strings are an exact prefix of the object's name. + MatchesPrefix []string + // MatchesStorageClasses is the condition matching the object's storage // class. // // Values include "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE". MatchesStorageClasses []string + // MatchesSuffix is the condition matching an object if any of the + // matches_suffix strings are an exact suffix of the object's name. + MatchesSuffix []string + + // NoncurrentTimeBefore is the noncurrent timestamp of the object. This + // condition is satisfied when an object's noncurrent timestamp is before + // midnight of the specified date in UTC. + // + // This condition is relevant only for versioned objects. + NoncurrentTimeBefore time.Time + // NumNewerVersions is the condition matching objects with a number of newer versions. // // If the value is N, this condition is satisfied when there are at least N // versions (including the live version) newer than this version of the // object. + // Note: Using `0` as the value will be ignored by the library and not sent to the API. NumNewerVersions int64 } @@ -500,6 +707,29 @@ type BucketWebsite struct { NotFoundPage string } +// CustomPlacementConfig holds the bucket's custom placement +// configuration for Custom Dual Regions. See +// https://cloud.google.com/storage/docs/locations#location-dr for more information. +type CustomPlacementConfig struct { + // The list of regional locations in which data is placed. + // Custom Dual Regions require exactly 2 regional locations. + DataLocations []string +} + +// Autoclass holds the bucket's autoclass configuration. If enabled, +// allows for the automatic selection of the best storage class +// based on object access patterns. See +// https://cloud.google.com/storage/docs/using-autoclass for more information. +type Autoclass struct { + // Enabled specifies whether the autoclass feature is enabled + // on the bucket. + Enabled bool + // ToggleTime is the time from which Autoclass was last toggled. + // If Autoclass is enabled when the bucket is created, the ToggleTime + // is set to the bucket creation time. This field is read-only. + ToggleTime time.Time +} + func newBucket(b *raw.Bucket) (*BucketAttrs, error) { if b == nil { return nil, nil @@ -528,11 +758,49 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) { Website: toBucketWebsite(b.Website), BucketPolicyOnly: toBucketPolicyOnly(b.IamConfiguration), UniformBucketLevelAccess: toUniformBucketLevelAccess(b.IamConfiguration), + PublicAccessPrevention: toPublicAccessPrevention(b.IamConfiguration), Etag: b.Etag, LocationType: b.LocationType, + ProjectNumber: b.ProjectNumber, + RPO: toRPO(b), + CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig), + Autoclass: toAutoclassFromRaw(b.Autoclass), }, nil } +func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs { + if b == nil { + return nil + } + return &BucketAttrs{ + Name: parseBucketName(b.GetName()), + Location: b.GetLocation(), + MetaGeneration: b.GetMetageneration(), + DefaultEventBasedHold: b.GetDefaultEventBasedHold(), + StorageClass: b.GetStorageClass(), + Created: b.GetCreateTime().AsTime(), + VersioningEnabled: b.GetVersioning().GetEnabled(), + ACL: toBucketACLRulesFromProto(b.GetAcl()), + DefaultObjectACL: toObjectACLRulesFromProto(b.GetDefaultObjectAcl()), + Labels: b.GetLabels(), + RequesterPays: b.GetBilling().GetRequesterPays(), + Lifecycle: toLifecycleFromProto(b.GetLifecycle()), + RetentionPolicy: toRetentionPolicyFromProto(b.GetRetentionPolicy()), + CORS: toCORSFromProto(b.GetCors()), + Encryption: toBucketEncryptionFromProto(b.GetEncryption()), + Logging: toBucketLoggingFromProto(b.GetLogging()), + Website: toBucketWebsiteFromProto(b.GetWebsite()), + BucketPolicyOnly: toBucketPolicyOnlyFromProto(b.GetIamConfig()), + UniformBucketLevelAccess: toUniformBucketLevelAccessFromProto(b.GetIamConfig()), + PublicAccessPrevention: toPublicAccessPreventionFromProto(b.GetIamConfig()), + LocationType: b.GetLocationType(), + RPO: toRPOFromProto(b), + CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()), + ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based + Autoclass: toAutoclassFromProto(b.GetAutoclass()), + } +} + // toRawBucket copies the editable attribute from b to the raw library's Bucket type. func (b *BucketAttrs) toRawBucket() *raw.Bucket { // Copy label map. @@ -555,29 +823,179 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket { bb = &raw.BucketBilling{RequesterPays: true} } var bktIAM *raw.BucketIamConfiguration - if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled { - bktIAM = &raw.BucketIamConfiguration{ - UniformBucketLevelAccess: &raw.BucketIamConfigurationUniformBucketLevelAccess{ + if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM = &raw.BucketIamConfiguration{} + if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled { + bktIAM.UniformBucketLevelAccess = &raw.BucketIamConfigurationUniformBucketLevelAccess{ Enabled: true, - }, + } + } + if b.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM.PublicAccessPrevention = b.PublicAccessPrevention.String() } } return &raw.Bucket{ - Name: b.Name, - Location: b.Location, - StorageClass: b.StorageClass, - Acl: toRawBucketACL(b.ACL), - DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL), - Versioning: v, - Labels: labels, - Billing: bb, - Lifecycle: toRawLifecycle(b.Lifecycle), - RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(), - Cors: toRawCORS(b.CORS), - Encryption: b.Encryption.toRawBucketEncryption(), - Logging: b.Logging.toRawBucketLogging(), - Website: b.Website.toRawBucketWebsite(), - IamConfiguration: bktIAM, + Name: b.Name, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: toRawBucketACL(b.ACL), + DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL), + Versioning: v, + Labels: labels, + Billing: bb, + Lifecycle: toRawLifecycle(b.Lifecycle), + RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(), + Cors: toRawCORS(b.CORS), + Encryption: b.Encryption.toRawBucketEncryption(), + Logging: b.Logging.toRawBucketLogging(), + Website: b.Website.toRawBucketWebsite(), + IamConfiguration: bktIAM, + Rpo: b.RPO.String(), + CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(), + Autoclass: b.Autoclass.toRawAutoclass(), + } +} + +func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket { + if b == nil { + return &storagepb.Bucket{} + } + + // Copy label map. + var labels map[string]string + if len(b.Labels) > 0 { + labels = make(map[string]string, len(b.Labels)) + for k, v := range b.Labels { + labels[k] = v + } + } + + // Ignore VersioningEnabled if it is false. This is OK because + // we only call this method when creating a bucket, and by default + // new buckets have versioning off. + var v *storagepb.Bucket_Versioning + if b.VersioningEnabled { + v = &storagepb.Bucket_Versioning{Enabled: true} + } + var bb *storagepb.Bucket_Billing + if b.RequesterPays { + bb = &storagepb.Bucket_Billing{RequesterPays: true} + } + var bktIAM *storagepb.Bucket_IamConfig + if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM = &storagepb.Bucket_IamConfig{} + if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled { + bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ + Enabled: true, + } + } + if b.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM.PublicAccessPrevention = b.PublicAccessPrevention.String() + } + } + + return &storagepb.Bucket{ + Name: b.Name, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: toProtoBucketACL(b.ACL), + DefaultObjectAcl: toProtoObjectACL(b.DefaultObjectACL), + Versioning: v, + Labels: labels, + Billing: bb, + Lifecycle: toProtoLifecycle(b.Lifecycle), + RetentionPolicy: b.RetentionPolicy.toProtoRetentionPolicy(), + Cors: toProtoCORS(b.CORS), + Encryption: b.Encryption.toProtoBucketEncryption(), + Logging: b.Logging.toProtoBucketLogging(), + Website: b.Website.toProtoBucketWebsite(), + IamConfig: bktIAM, + Rpo: b.RPO.String(), + CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(), + Autoclass: b.Autoclass.toProtoAutoclass(), + } +} + +func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket { + if ua == nil { + return &storagepb.Bucket{} + } + + // TODO(cathyo): Handle labels. Pending b/230510191. + + var v *storagepb.Bucket_Versioning + if ua.VersioningEnabled != nil { + v = &storagepb.Bucket_Versioning{Enabled: optional.ToBool(ua.VersioningEnabled)} + } + var bb *storagepb.Bucket_Billing + if ua.RequesterPays != nil { + bb = &storagepb.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)} + } + + var bktIAM *storagepb.Bucket_IamConfig + if ua.UniformBucketLevelAccess != nil || ua.BucketPolicyOnly != nil || ua.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM = &storagepb.Bucket_IamConfig{} + + if ua.BucketPolicyOnly != nil { + bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ + Enabled: optional.ToBool(ua.BucketPolicyOnly.Enabled), + } + } + + if ua.UniformBucketLevelAccess != nil { + // UniformBucketLevelAccess takes precedence over BucketPolicyOnly, + // so Enabled will be overriden here if both are set + bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ + Enabled: optional.ToBool(ua.UniformBucketLevelAccess.Enabled), + } + } + + if ua.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM.PublicAccessPrevention = ua.PublicAccessPrevention.String() + } + } + + var defaultHold bool + if ua.DefaultEventBasedHold != nil { + defaultHold = optional.ToBool(ua.DefaultEventBasedHold) + } + var lifecycle Lifecycle + if ua.Lifecycle != nil { + lifecycle = *ua.Lifecycle + } + var bktACL []*storagepb.BucketAccessControl + if ua.acl != nil { + bktACL = toProtoBucketACL(ua.acl) + } + if ua.PredefinedACL != "" { + // Clear ACL or the call will fail. + bktACL = nil + } + var bktDefaultObjectACL []*storagepb.ObjectAccessControl + if ua.defaultObjectACL != nil { + bktDefaultObjectACL = toProtoObjectACL(ua.defaultObjectACL) + } + if ua.PredefinedDefaultObjectACL != "" { + // Clear ACLs or the call will fail. + bktDefaultObjectACL = nil + } + + return &storagepb.Bucket{ + StorageClass: ua.StorageClass, + Acl: bktACL, + DefaultObjectAcl: bktDefaultObjectACL, + DefaultEventBasedHold: defaultHold, + Versioning: v, + Billing: bb, + Lifecycle: toProtoLifecycle(lifecycle), + RetentionPolicy: ua.RetentionPolicy.toProtoRetentionPolicy(), + Cors: toProtoCORS(ua.CORS), + Encryption: ua.Encryption.toProtoBucketEncryption(), + Logging: ua.Logging.toProtoBucketLogging(), + Website: ua.Website.toProtoBucketWebsite(), + IamConfig: bktIAM, + Rpo: ua.RPO.String(), + Autoclass: ua.Autoclass.toProtoAutoclass(), } } @@ -638,6 +1056,21 @@ type BucketAttrsToUpdate struct { // for more information. UniformBucketLevelAccess *UniformBucketLevelAccess + // PublicAccessPrevention is the setting for the bucket's + // PublicAccessPrevention policy, which can be used to prevent public access + // of data in the bucket. See + // https://cloud.google.com/storage/docs/public-access-prevention for more + // information. + PublicAccessPrevention PublicAccessPrevention + + // StorageClass is the default storage class of the bucket. This defines + // how objects in the bucket are stored and determines the SLA + // and the cost of storage. Typical values are "STANDARD", "NEARLINE", + // "COLDLINE" and "ARCHIVE". Defaults to "STANDARD". + // See https://cloud.google.com/storage/docs/storage-classes for all + // valid values. + StorageClass string + // If set, updates the retention policy of the bucket. Using // RetentionPolicy.RetentionPeriod = 0 will delete the existing policy. // @@ -672,6 +1105,27 @@ type BucketAttrsToUpdate struct { // See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch. PredefinedDefaultObjectACL string + // RPO configures the Recovery Point Objective (RPO) policy of the bucket. + // Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket. + // See https://cloud.google.com/storage/docs/managing-turbo-replication for + // more information. + RPO RPO + + // If set, updates the autoclass configuration of the bucket. + // See https://cloud.google.com/storage/docs/using-autoclass for more information. + Autoclass *Autoclass + + // acl is the list of access control rules on the bucket. + // It is unexported and only used internally by the gRPC client. + // Library users should use ACLHandle methods directly. + acl []ACLRule + + // defaultObjectACL is the list of access controls to + // apply to new objects when no object ACL is provided. + // It is unexported and only used internally by the gRPC client. + // Library users should use ACLHandle methods directly. + defaultObjectACL []ACLRule + setLabels map[string]string deleteLabels map[string]bool } @@ -740,6 +1194,12 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { }, } } + if ua.PublicAccessPrevention != PublicAccessPreventionUnknown { + if rb.IamConfiguration == nil { + rb.IamConfiguration = &raw.BucketIamConfiguration{} + } + rb.IamConfiguration.PublicAccessPrevention = ua.PublicAccessPrevention.String() + } if ua.Encryption != nil { if ua.Encryption.DefaultKMSKeyName == "" { rb.NullFields = append(rb.NullFields, "Encryption") @@ -768,6 +1228,12 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { rb.Website = ua.Website.toRawBucketWebsite() } } + if ua.Autoclass != nil { + rb.Autoclass = &raw.BucketAutoclass{ + Enabled: ua.Autoclass.Enabled, + ForceSendFields: []string{"Enabled"}, + } + } if ua.PredefinedACL != "" { // Clear ACL or the call will fail. rb.Acl = nil @@ -778,6 +1244,10 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { rb.DefaultObjectAcl = nil rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl") } + + rb.StorageClass = ua.StorageClass + rb.Rpo = ua.RPO.String() + if ua.setLabels != nil || ua.deleteLabels != nil { rb.Labels = map[string]string{} for k, v := range ua.setLabels { @@ -852,13 +1322,8 @@ func (b *BucketHandle) UserProject(projectID string) *BucketHandle { // most customers. It might be changed in backwards-incompatible ways and is not // subject to any SLA or deprecation policy. func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error { - var metageneration int64 - if b.conds != nil { - metageneration = b.conds.MetagenerationMatch - } - req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration) - _, err := req.Context(ctx).Do() - return err + o := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.LockBucketRetentionPolicy(ctx, b.name, b.conds, o...) } // applyBucketConds modifies the provided call using the conditions in conds. @@ -884,6 +1349,32 @@ func applyBucketConds(method string, conds *BucketConditions, call interface{}) return nil } +// applyBucketConds modifies the provided request message using the conditions +// in conds. msg is a protobuf Message that has fields if_metageneration_match +// and if_metageneration_not_match. +func applyBucketCondsProto(method string, conds *BucketConditions, msg proto.Message) error { + rmsg := msg.ProtoReflect() + + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + + switch { + case conds.MetagenerationMatch != 0: + if !setConditionProtoField(rmsg, "if_metageneration_match", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionProtoField(rmsg, "if_metageneration_not_match", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy { if rp == nil { return nil @@ -893,8 +1384,23 @@ func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy { } } -func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) { +func (rp *RetentionPolicy) toProtoRetentionPolicy() *storagepb.Bucket_RetentionPolicy { if rp == nil { + return nil + } + // RetentionPeriod must be greater than 0, so if it is 0, the user left it + // unset, and so we should not send it in the request i.e. nil is sent. + var dur *durationpb.Duration + if rp.RetentionPeriod != 0 { + dur = durationpb.New(rp.RetentionPeriod) + } + return &storagepb.Bucket_RetentionPolicy{ + RetentionDuration: dur, + } +} + +func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) { + if rp == nil || rp.EffectiveTime == "" { return nil, nil } t, err := time.Parse(time.RFC3339, rp.EffectiveTime) @@ -908,6 +1414,17 @@ func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) }, nil } +func toRetentionPolicyFromProto(rp *storagepb.Bucket_RetentionPolicy) *RetentionPolicy { + if rp == nil || rp.GetEffectiveTime().AsTime().Unix() == 0 { + return nil + } + return &RetentionPolicy{ + RetentionPeriod: rp.GetRetentionDuration().AsDuration(), + EffectiveTime: rp.GetEffectiveTime().AsTime(), + IsLocked: rp.GetIsLocked(), + } +} + func toRawCORS(c []CORS) []*raw.BucketCors { var out []*raw.BucketCors for _, v := range c { @@ -921,6 +1438,19 @@ func toRawCORS(c []CORS) []*raw.BucketCors { return out } +func toProtoCORS(c []CORS) []*storagepb.Bucket_Cors { + var out []*storagepb.Bucket_Cors + for _, v := range c { + out = append(out, &storagepb.Bucket_Cors{ + MaxAgeSeconds: int32(v.MaxAge / time.Second), + Method: v.Methods, + Origin: v.Origins, + ResponseHeader: v.ResponseHeaders, + }) + } + return out +} + func toCORS(rc []*raw.BucketCors) []CORS { var out []CORS for _, v := range rc { @@ -934,6 +1464,19 @@ func toCORS(rc []*raw.BucketCors) []CORS { return out } +func toCORSFromProto(rc []*storagepb.Bucket_Cors) []CORS { + var out []CORS + for _, v := range rc { + out = append(out, CORS{ + MaxAge: time.Duration(v.GetMaxAgeSeconds()) * time.Second, + Methods: v.GetMethod(), + Origins: v.GetOrigin(), + ResponseHeaders: v.GetResponseHeader(), + }) + } + return out +} + func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { var rl raw.BucketLifecycle if len(l.Rules) == 0 { @@ -946,12 +1489,25 @@ func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { StorageClass: r.Action.StorageClass, }, Condition: &raw.BucketLifecycleRuleCondition{ - Age: r.Condition.AgeInDays, - MatchesStorageClass: r.Condition.MatchesStorageClasses, - NumNewerVersions: r.Condition.NumNewerVersions, + DaysSinceCustomTime: r.Condition.DaysSinceCustomTime, + DaysSinceNoncurrentTime: r.Condition.DaysSinceNoncurrentTime, + MatchesPrefix: r.Condition.MatchesPrefix, + MatchesStorageClass: r.Condition.MatchesStorageClasses, + MatchesSuffix: r.Condition.MatchesSuffix, + NumNewerVersions: r.Condition.NumNewerVersions, }, } + // AllObjects takes precedent when both AllObjects and AgeInDays are set + // Rationale: If you've opted into using AllObjects, it makes sense that you + // understand the implications of how this option works with AgeInDays. + if r.Condition.AllObjects { + rr.Condition.Age = googleapi.Int64(0) + rr.Condition.ForceSendFields = []string{"Age"} + } else if r.Condition.AgeInDays > 0 { + rr.Condition.Age = googleapi.Int64(r.Condition.AgeInDays) + } + switch r.Condition.Liveness { case LiveAndArchived: rr.Condition.IsLive = nil @@ -964,6 +1520,64 @@ func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { if !r.Condition.CreatedBefore.IsZero() { rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date) } + if !r.Condition.CustomTimeBefore.IsZero() { + rr.Condition.CustomTimeBefore = r.Condition.CustomTimeBefore.Format(rfc3339Date) + } + if !r.Condition.NoncurrentTimeBefore.IsZero() { + rr.Condition.NoncurrentTimeBefore = r.Condition.NoncurrentTimeBefore.Format(rfc3339Date) + } + rl.Rule = append(rl.Rule, rr) + } + return &rl +} + +func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle { + var rl storagepb.Bucket_Lifecycle + + for _, r := range l.Rules { + rr := &storagepb.Bucket_Lifecycle_Rule{ + Action: &storagepb.Bucket_Lifecycle_Rule_Action{ + Type: r.Action.Type, + StorageClass: r.Action.StorageClass, + }, + Condition: &storagepb.Bucket_Lifecycle_Rule_Condition{ + // Note: The Apiary types use int64 (even though the Discovery + // doc states "format: int32"), so the client types used int64, + // but the proto uses int32 so we have a potentially lossy + // conversion. + AgeDays: proto.Int32(int32(r.Condition.AgeInDays)), + DaysSinceCustomTime: proto.Int32(int32(r.Condition.DaysSinceCustomTime)), + DaysSinceNoncurrentTime: proto.Int32(int32(r.Condition.DaysSinceNoncurrentTime)), + MatchesPrefix: r.Condition.MatchesPrefix, + MatchesStorageClass: r.Condition.MatchesStorageClasses, + MatchesSuffix: r.Condition.MatchesSuffix, + NumNewerVersions: proto.Int32(int32(r.Condition.NumNewerVersions)), + }, + } + + // TODO(#6205): This may not be needed for gRPC + if r.Condition.AllObjects { + rr.Condition.AgeDays = proto.Int32(0) + } + + switch r.Condition.Liveness { + case LiveAndArchived: + rr.Condition.IsLive = nil + case Live: + rr.Condition.IsLive = proto.Bool(true) + case Archived: + rr.Condition.IsLive = proto.Bool(false) + } + + if !r.Condition.CreatedBefore.IsZero() { + rr.Condition.CreatedBefore = timeToProtoDate(r.Condition.CreatedBefore) + } + if !r.Condition.CustomTimeBefore.IsZero() { + rr.Condition.CustomTimeBefore = timeToProtoDate(r.Condition.CustomTimeBefore) + } + if !r.Condition.NoncurrentTimeBefore.IsZero() { + rr.Condition.NoncurrentTimeBefore = timeToProtoDate(r.Condition.NoncurrentTimeBefore) + } rl.Rule = append(rl.Rule, rr) } return &rl @@ -981,11 +1595,20 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { StorageClass: rr.Action.StorageClass, }, Condition: LifecycleCondition{ - AgeInDays: rr.Condition.Age, - MatchesStorageClasses: rr.Condition.MatchesStorageClass, - NumNewerVersions: rr.Condition.NumNewerVersions, + DaysSinceCustomTime: rr.Condition.DaysSinceCustomTime, + DaysSinceNoncurrentTime: rr.Condition.DaysSinceNoncurrentTime, + MatchesPrefix: rr.Condition.MatchesPrefix, + MatchesStorageClasses: rr.Condition.MatchesStorageClass, + MatchesSuffix: rr.Condition.MatchesSuffix, + NumNewerVersions: rr.Condition.NumNewerVersions, }, } + if rr.Condition.Age != nil { + r.Condition.AgeInDays = *rr.Condition.Age + if *rr.Condition.Age == 0 { + r.Condition.AllObjects = true + } + } if rr.Condition.IsLive == nil { r.Condition.Liveness = LiveAndArchived @@ -998,6 +1621,61 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { if rr.Condition.CreatedBefore != "" { r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore) } + if rr.Condition.CustomTimeBefore != "" { + r.Condition.CustomTimeBefore, _ = time.Parse(rfc3339Date, rr.Condition.CustomTimeBefore) + } + if rr.Condition.NoncurrentTimeBefore != "" { + r.Condition.NoncurrentTimeBefore, _ = time.Parse(rfc3339Date, rr.Condition.NoncurrentTimeBefore) + } + l.Rules = append(l.Rules, r) + } + return l +} + +func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle { + var l Lifecycle + if rl == nil { + return l + } + for _, rr := range rl.GetRule() { + r := LifecycleRule{ + Action: LifecycleAction{ + Type: rr.GetAction().GetType(), + StorageClass: rr.GetAction().GetStorageClass(), + }, + Condition: LifecycleCondition{ + AgeInDays: int64(rr.GetCondition().GetAgeDays()), + DaysSinceCustomTime: int64(rr.GetCondition().GetDaysSinceCustomTime()), + DaysSinceNoncurrentTime: int64(rr.GetCondition().GetDaysSinceNoncurrentTime()), + MatchesPrefix: rr.GetCondition().GetMatchesPrefix(), + MatchesStorageClasses: rr.GetCondition().GetMatchesStorageClass(), + MatchesSuffix: rr.GetCondition().GetMatchesSuffix(), + NumNewerVersions: int64(rr.GetCondition().GetNumNewerVersions()), + }, + } + + // TODO(#6205): This may not be needed for gRPC + if rr.GetCondition().GetAgeDays() == 0 { + r.Condition.AllObjects = true + } + + if rr.GetCondition().IsLive == nil { + r.Condition.Liveness = LiveAndArchived + } else if rr.GetCondition().GetIsLive() { + r.Condition.Liveness = Live + } else { + r.Condition.Liveness = Archived + } + + if rr.GetCondition().GetCreatedBefore() != nil { + r.Condition.CreatedBefore = protoDateToUTCTime(rr.GetCondition().GetCreatedBefore()) + } + if rr.GetCondition().GetCustomTimeBefore() != nil { + r.Condition.CustomTimeBefore = protoDateToUTCTime(rr.GetCondition().GetCustomTimeBefore()) + } + if rr.GetCondition().GetNoncurrentTimeBefore() != nil { + r.Condition.NoncurrentTimeBefore = protoDateToUTCTime(rr.GetCondition().GetNoncurrentTimeBefore()) + } l.Rules = append(l.Rules, r) } return l @@ -1012,6 +1690,15 @@ func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption { } } +func (e *BucketEncryption) toProtoBucketEncryption() *storagepb.Bucket_Encryption { + if e == nil { + return nil + } + return &storagepb.Bucket_Encryption{ + DefaultKmsKey: e.DefaultKMSKeyName, + } +} + func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption { if e == nil { return nil @@ -1019,6 +1706,13 @@ func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption { return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName} } +func toBucketEncryptionFromProto(e *storagepb.Bucket_Encryption) *BucketEncryption { + if e == nil { + return nil + } + return &BucketEncryption{DefaultKMSKeyName: e.GetDefaultKmsKey()} +} + func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging { if b == nil { return nil @@ -1029,6 +1723,16 @@ func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging { } } +func (b *BucketLogging) toProtoBucketLogging() *storagepb.Bucket_Logging { + if b == nil { + return nil + } + return &storagepb.Bucket_Logging{ + LogBucket: bucketResourceName(globalProjectAlias, b.LogBucket), + LogObjectPrefix: b.LogObjectPrefix, + } +} + func toBucketLogging(b *raw.BucketLogging) *BucketLogging { if b == nil { return nil @@ -1039,6 +1743,17 @@ func toBucketLogging(b *raw.BucketLogging) *BucketLogging { } } +func toBucketLoggingFromProto(b *storagepb.Bucket_Logging) *BucketLogging { + if b == nil { + return nil + } + lb := parseBucketName(b.GetLogBucket()) + return &BucketLogging{ + LogBucket: lb, + LogObjectPrefix: b.GetLogObjectPrefix(), + } +} + func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite { if w == nil { return nil @@ -1049,6 +1764,16 @@ func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite { } } +func (w *BucketWebsite) toProtoBucketWebsite() *storagepb.Bucket_Website { + if w == nil { + return nil + } + return &storagepb.Bucket_Website{ + MainPageSuffix: w.MainPageSuffix, + NotFoundPage: w.NotFoundPage, + } +} + func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite { if w == nil { return nil @@ -1059,6 +1784,16 @@ func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite { } } +func toBucketWebsiteFromProto(w *storagepb.Bucket_Website) *BucketWebsite { + if w == nil { + return nil + } + return &BucketWebsite{ + MainPageSuffix: w.GetMainPageSuffix(), + NotFoundPage: w.GetNotFoundPage(), + } +} + func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly { if b == nil || b.BucketPolicyOnly == nil || !b.BucketPolicyOnly.Enabled { return BucketPolicyOnly{} @@ -1075,6 +1810,16 @@ func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly { } } +func toBucketPolicyOnlyFromProto(b *storagepb.Bucket_IamConfig) BucketPolicyOnly { + if b == nil || !b.GetUniformBucketLevelAccess().GetEnabled() { + return BucketPolicyOnly{} + } + return BucketPolicyOnly{ + Enabled: true, + LockedTime: b.GetUniformBucketLevelAccess().GetLockTime().AsTime(), + } +} + func toUniformBucketLevelAccess(b *raw.BucketIamConfiguration) UniformBucketLevelAccess { if b == nil || b.UniformBucketLevelAccess == nil || !b.UniformBucketLevelAccess.Enabled { return UniformBucketLevelAccess{} @@ -1091,23 +1836,186 @@ func toUniformBucketLevelAccess(b *raw.BucketIamConfiguration) UniformBucketLeve } } -// Objects returns an iterator over the objects in the bucket that match the Query q. -// If q is nil, no filtering is done. +func toUniformBucketLevelAccessFromProto(b *storagepb.Bucket_IamConfig) UniformBucketLevelAccess { + if b == nil || !b.GetUniformBucketLevelAccess().GetEnabled() { + return UniformBucketLevelAccess{} + } + return UniformBucketLevelAccess{ + Enabled: true, + LockedTime: b.GetUniformBucketLevelAccess().GetLockTime().AsTime(), + } +} + +func toPublicAccessPrevention(b *raw.BucketIamConfiguration) PublicAccessPrevention { + if b == nil { + return PublicAccessPreventionUnknown + } + switch b.PublicAccessPrevention { + case publicAccessPreventionInherited, publicAccessPreventionUnspecified: + return PublicAccessPreventionInherited + case publicAccessPreventionEnforced: + return PublicAccessPreventionEnforced + default: + return PublicAccessPreventionUnknown + } +} + +func toPublicAccessPreventionFromProto(b *storagepb.Bucket_IamConfig) PublicAccessPrevention { + if b == nil { + return PublicAccessPreventionUnknown + } + switch b.GetPublicAccessPrevention() { + case publicAccessPreventionInherited, publicAccessPreventionUnspecified: + return PublicAccessPreventionInherited + case publicAccessPreventionEnforced: + return PublicAccessPreventionEnforced + default: + return PublicAccessPreventionUnknown + } +} + +func toRPO(b *raw.Bucket) RPO { + if b == nil { + return RPOUnknown + } + switch b.Rpo { + case rpoDefault: + return RPODefault + case rpoAsyncTurbo: + return RPOAsyncTurbo + default: + return RPOUnknown + } +} + +func toRPOFromProto(b *storagepb.Bucket) RPO { + if b == nil { + return RPOUnknown + } + switch b.GetRpo() { + case rpoDefault: + return RPODefault + case rpoAsyncTurbo: + return RPOAsyncTurbo + default: + return RPOUnknown + } +} + +func customPlacementFromRaw(c *raw.BucketCustomPlacementConfig) *CustomPlacementConfig { + if c == nil { + return nil + } + return &CustomPlacementConfig{DataLocations: c.DataLocations} +} + +func (c *CustomPlacementConfig) toRawCustomPlacement() *raw.BucketCustomPlacementConfig { + if c == nil { + return nil + } + return &raw.BucketCustomPlacementConfig{ + DataLocations: c.DataLocations, + } +} + +func (c *CustomPlacementConfig) toProtoCustomPlacement() *storagepb.Bucket_CustomPlacementConfig { + if c == nil { + return nil + } + return &storagepb.Bucket_CustomPlacementConfig{ + DataLocations: c.DataLocations, + } +} + +func customPlacementFromProto(c *storagepb.Bucket_CustomPlacementConfig) *CustomPlacementConfig { + if c == nil { + return nil + } + return &CustomPlacementConfig{DataLocations: c.GetDataLocations()} +} + +func (a *Autoclass) toRawAutoclass() *raw.BucketAutoclass { + if a == nil { + return nil + } + // Excluding read only field ToggleTime. + return &raw.BucketAutoclass{ + Enabled: a.Enabled, + } +} + +func (a *Autoclass) toProtoAutoclass() *storagepb.Bucket_Autoclass { + if a == nil { + return nil + } + // Excluding read only field ToggleTime. + return &storagepb.Bucket_Autoclass{ + Enabled: a.Enabled, + } +} + +func toAutoclassFromRaw(a *raw.BucketAutoclass) *Autoclass { + if a == nil || a.ToggleTime == "" { + return nil + } + // Return Autoclass.ToggleTime only if parsed with a valid value. + t, err := time.Parse(time.RFC3339, a.ToggleTime) + if err != nil { + return &Autoclass{ + Enabled: a.Enabled, + } + } + return &Autoclass{ + Enabled: a.Enabled, + ToggleTime: t, + } +} + +func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass { + if a == nil || a.GetToggleTime().AsTime().Unix() == 0 { + return nil + } + return &Autoclass{ + Enabled: a.GetEnabled(), + ToggleTime: a.GetToggleTime().AsTime(), + } +} + +// Objects returns an iterator over the objects in the bucket that match the +// Query q. If q is nil, no filtering is done. Objects will be iterated over +// lexicographically by name. // // Note: The returned iterator is not safe for concurrent operations without explicit synchronization. func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { - it := &ObjectIterator{ - ctx: ctx, - bucket: b, + o := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.ListObjects(ctx, b.name, q, o...) +} + +// Retryer returns a bucket handle that is configured with custom retry +// behavior as specified by the options that are passed to it. All operations +// on the new handle will use the customized retry configuration. +// Retry options set on a object handle will take precedence over options set on +// the bucket handle. +// These retry options will merge with the client's retry configuration (if set) +// for the returned handle. Options passed into this method will take precedence +// over retry options on the client. Note that you must explicitly pass in each +// option you want to override. +func (b *BucketHandle) Retryer(opts ...RetryOption) *BucketHandle { + b2 := *b + var retry *retryConfig + if b.retry != nil { + // merge the options with the existing retry + retry = b.retry + } else { + retry = &retryConfig{} } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - it.fetch, - func() int { return len(it.items) }, - func() interface{} { b := it.items; it.items = nil; return b }) - if q != nil { - it.query = *q + for _, opt := range opts { + opt.apply(retry) } - return it + b2.retry = retry + b2.acl.retry = retry + b2.defaultObjectACL.retry = retry + return &b2 } // An ObjectIterator is an iterator over ObjectAttrs. @@ -1115,7 +2023,6 @@ func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { // Note: This iterator is not safe for concurrent operations without explicit synchronization. type ObjectIterator struct { ctx context.Context - bucket *BucketHandle query Query pageInfo *iterator.PageInfo nextFunc func() error @@ -1131,6 +2038,13 @@ func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // there are no more results. Once Next returns iterator.Done, all subsequent // calls will return iterator.Done. // +// In addition, if Next returns an error other than iterator.Done, all +// subsequent calls will return the same error. To continue iteration, a new +// `ObjectIterator` must be created. Since objects are ordered lexicographically +// by name, `Query.StartOffset` can be used to create a new iterator which will +// start at the desired place. See +// https://pkg.go.dev/cloud.google.com/go/storage?tab=doc#hdr-Listing_objects. +// // If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will // have a non-empty Prefix field, and a zero value for all other fields. These // represent prefixes. @@ -1145,44 +2059,6 @@ func (it *ObjectIterator) Next() (*ObjectAttrs, error) { return item, nil } -func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) { - req := it.bucket.c.raw.Objects.List(it.bucket.name) - setClientHeader(req.Header()) - req.Projection("full") - req.Delimiter(it.query.Delimiter) - req.Prefix(it.query.Prefix) - req.Versions(it.query.Versions) - if len(it.query.fieldSelection) > 0 { - req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection)) - } - req.PageToken(pageToken) - if it.bucket.userProject != "" { - req.UserProject(it.bucket.userProject) - } - if pageSize > 0 { - req.MaxResults(int64(pageSize)) - } - var resp *raw.Objects - var err error - err = runWithRetry(it.ctx, func() error { - resp, err = req.Context(it.ctx).Do() - return err - }) - if err != nil { - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - err = ErrBucketNotExist - } - return "", err - } - for _, item := range resp.Items { - it.items = append(it.items, newObject(item)) - } - for _, prefix := range resp.Prefixes { - it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) - } - return resp.NextPageToken, nil -} - // Buckets returns an iterator over the buckets in the project. You may // optionally set the iterator's Prefix field to restrict the list to buckets // whose names begin with the prefix. By default, all buckets in the project @@ -1190,17 +2066,8 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) // // Note: The returned iterator is not safe for concurrent operations without explicit synchronization. func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { - it := &BucketIterator{ - ctx: ctx, - client: c, - projectID: projectID, - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - it.fetch, - func() int { return len(it.buckets) }, - func() interface{} { b := it.buckets; it.buckets = nil; return b }) - - return it + o := makeStorageOpts(true, c.retry, "") + return c.tc.ListBuckets(ctx, projectID, o...) } // A BucketIterator is an iterator over BucketAttrs. @@ -1211,7 +2078,6 @@ type BucketIterator struct { Prefix string ctx context.Context - client *Client projectID string buckets []*BucketAttrs pageInfo *iterator.PageInfo @@ -1237,29 +2103,63 @@ func (it *BucketIterator) Next() (*BucketAttrs, error) { // Note: This method is not safe for concurrent operations without explicit synchronization. func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } -func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) { - req := it.client.raw.Buckets.List(it.projectID) - setClientHeader(req.Header()) - req.Projection("full") - req.Prefix(it.Prefix) - req.PageToken(pageToken) - if pageSize > 0 { - req.MaxResults(int64(pageSize)) - } - var resp *raw.Buckets - err = runWithRetry(it.ctx, func() error { - resp, err = req.Context(it.ctx).Do() - return err - }) - if err != nil { - return "", err +// RPO (Recovery Point Objective) configures the turbo replication feature. See +// https://cloud.google.com/storage/docs/managing-turbo-replication for more information. +type RPO int + +const ( + // RPOUnknown is a zero value. It may be returned from bucket.Attrs() if RPO + // is not present in the bucket metadata, that is, the bucket is not dual-region. + // This value is also used if the RPO field is not set in a call to GCS. + RPOUnknown RPO = iota + + // RPODefault represents default replication. It is used to reset RPO on an + // existing bucket that has this field set to RPOAsyncTurbo. Otherwise it + // is equivalent to RPOUnknown, and is always ignored. This value is valid + // for dual- or multi-region buckets. + RPODefault + + // RPOAsyncTurbo represents turbo replication and is used to enable Turbo + // Replication on a bucket. This value is only valid for dual-region buckets. + RPOAsyncTurbo + + rpoUnknown string = "" + rpoDefault = "DEFAULT" + rpoAsyncTurbo = "ASYNC_TURBO" +) + +func (rpo RPO) String() string { + switch rpo { + case RPODefault: + return rpoDefault + case RPOAsyncTurbo: + return rpoAsyncTurbo + default: + return rpoUnknown } - for _, item := range resp.Items { - b, err := newBucket(item) - if err != nil { - return "", err - } - it.buckets = append(it.buckets, b) +} + +// protoDateToUTCTime returns a new Time based on the google.type.Date, in UTC. +// +// Hours, minutes, seconds, and nanoseconds are set to 0. +func protoDateToUTCTime(d *dpb.Date) time.Time { + return protoDateToTime(d, time.UTC) +} + +// protoDateToTime returns a new Time based on the google.type.Date and provided +// *time.Location. +// +// Hours, minutes, seconds, and nanoseconds are set to 0. +func protoDateToTime(d *dpb.Date, l *time.Location) time.Time { + return time.Date(int(d.GetYear()), time.Month(d.GetMonth()), int(d.GetDay()), 0, 0, 0, 0, l) +} + +// timeToProtoDate returns a new google.type.Date based on the provided time.Time. +// The location is ignored, as is anything more precise than the day. +func timeToProtoDate(t time.Time) *dpb.Date { + return &dpb.Date{ + Year: int32(t.Year()), + Month: int32(t.Month()), + Day: int32(t.Day()), } - return resp.NextPageToken, nil } diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go new file mode 100644 index 0000000000000..d579a2b1ee78a --- /dev/null +++ b/vendor/cloud.google.com/go/storage/client.go @@ -0,0 +1,333 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "io" + "time" + + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/option" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +// TODO(noahdietz): Move existing factory methods to this file. + +// storageClient is an internal-only interface designed to separate the +// transport-specific logic of making Storage API calls from the logic of the +// client library. +// +// Implementation requirements beyond implementing the interface include: +// * factory method(s) must accept a `userProject string` param +// * `settings` must be retained per instance +// * `storageOption`s must be resolved in the order they are received +// * all API errors must be wrapped in the gax-go APIError type +// * any unimplemented interface methods must return a StorageUnimplementedErr +// +// TODO(noahdietz): This interface is currently not used in the production code +// paths +type storageClient interface { + + // Top-level methods. + + GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) + CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) + ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator + Close() error + + // Bucket methods. + + DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error + GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) + UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) + LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error + ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator + + // Object metadata methods. + + DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error + GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) + UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) + + // Default Object ACL methods. + + DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error + ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) + UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error + + // Bucket ACL methods. + + DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error + ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) + UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error + + // Object ACL methods. + + DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error + ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) + UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error + + // Media operations. + + ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) + RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) + + NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (*Reader, error) + OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) + + // IAM methods. + + GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) + SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error + TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) + + // HMAC Key methods. + + GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) + ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator + UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) + CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) + DeleteHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) error + + // Notification methods. + ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error) + CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (*Notification, error) + DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) error +} + +// settings contains transport-agnostic configuration for API calls made via +// the storageClient inteface. All implementations must utilize settings +// and respect those that are applicable. +type settings struct { + // retry is the complete retry configuration to use when evaluating if an + // API call should be retried. + retry *retryConfig + + // gax is a set of gax.CallOption to be conveyed to gax.Invoke. + // Note: Not all storageClient interfaces will must use gax.Invoke. + gax []gax.CallOption + + // idempotent indicates if the call is idempotent or not when considering + // if the call should be retired or not. + idempotent bool + + // clientOption is a set of option.ClientOption to be used during client + // transport initialization. See https://pkg.go.dev/google.golang.org/api/option + // for a list of supported options. + clientOption []option.ClientOption + + // userProject is the user project that should be billed for the request. + userProject string +} + +func initSettings(opts ...storageOption) *settings { + s := &settings{} + resolveOptions(s, opts...) + return s +} + +func resolveOptions(s *settings, opts ...storageOption) { + for _, o := range opts { + o.Apply(s) + } +} + +// callSettings is a helper for resolving storage options against the settings +// in the context of an individual call. This is to ensure that client-level +// default settings are not mutated by two different calls getting options. +// +// Example: s := callSettings(c.settings, opts...) +func callSettings(defaults *settings, opts ...storageOption) *settings { + if defaults == nil { + return nil + } + // This does not make a deep copy of the pointer/slice fields, but all + // options replace the settings fields rather than modify their values in + // place. + cs := *defaults + resolveOptions(&cs, opts...) + return &cs +} + +// makeStorageOpts is a helper for generating a set of storageOption based on +// idempotency, retryConfig, and userProject. All top-level client operations +// will generally have to pass these options through the interface. +func makeStorageOpts(isIdempotent bool, retry *retryConfig, userProject string) []storageOption { + opts := []storageOption{idempotent(isIdempotent)} + if retry != nil { + opts = append(opts, withRetryConfig(retry)) + } + if userProject != "" { + opts = append(opts, withUserProject(userProject)) + } + return opts +} + +// storageOption is the transport-agnostic call option for the storageClient +// interface. +type storageOption interface { + Apply(s *settings) +} + +func withGAXOptions(opts ...gax.CallOption) storageOption { + return &gaxOption{opts} +} + +type gaxOption struct { + opts []gax.CallOption +} + +func (o *gaxOption) Apply(s *settings) { s.gax = o.opts } + +func withRetryConfig(rc *retryConfig) storageOption { + return &retryOption{rc} +} + +type retryOption struct { + rc *retryConfig +} + +func (o *retryOption) Apply(s *settings) { s.retry = o.rc } + +func idempotent(i bool) storageOption { + return &idempotentOption{i} +} + +type idempotentOption struct { + idempotency bool +} + +func (o *idempotentOption) Apply(s *settings) { s.idempotent = o.idempotency } + +func withClientOptions(opts ...option.ClientOption) storageOption { + return &clientOption{opts: opts} +} + +type clientOption struct { + opts []option.ClientOption +} + +func (o *clientOption) Apply(s *settings) { s.clientOption = o.opts } + +func withUserProject(project string) storageOption { + return &userProjectOption{project} +} + +type userProjectOption struct { + project string +} + +func (o *userProjectOption) Apply(s *settings) { s.userProject = o.project } + +type openWriterParams struct { + // Writer configuration + + // ctx is the context used by the writer routine to make all network calls + // and to manage the writer routine - see `Writer.ctx`. + // Required. + ctx context.Context + // chunkSize - see `Writer.ChunkSize`. + // Optional. + chunkSize int + // chunkRetryDeadline - see `Writer.ChunkRetryDeadline`. + // Optional. + chunkRetryDeadline time.Duration + + // Object/request properties + + // bucket - see `Writer.o.bucket`. + // Required. + bucket string + // attrs - see `Writer.ObjectAttrs`. + // Required. + attrs *ObjectAttrs + // conds - see `Writer.o.conds`. + // Optional. + conds *Conditions + // encryptionKey - see `Writer.o.encryptionKey` + // Optional. + encryptionKey []byte + // sendCRC32C - see `Writer.SendCRC32C`. + // Optional. + sendCRC32C bool + + // Writer callbacks + + // donec - see `Writer.donec`. + // Required. + donec chan struct{} + // setError callback for reporting errors - see `Writer.error`. + // Required. + setError func(error) + // progress callback for reporting upload progress - see `Writer.progress`. + // Required. + progress func(int64) + // setObj callback for reporting the resulting object - see `Writer.obj`. + // Required. + setObj func(*ObjectAttrs) +} + +type newRangeReaderParams struct { + bucket string + conds *Conditions + encryptionKey []byte + gen int64 + length int64 + object string + offset int64 + readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently. +} + +type composeObjectRequest struct { + dstBucket string + dstObject destinationObject + srcs []sourceObject + predefinedACL string + sendCRC32C bool +} + +type sourceObject struct { + name string + bucket string + gen int64 + conds *Conditions + encryptionKey []byte +} + +type destinationObject struct { + name string + bucket string + conds *Conditions + attrs *ObjectAttrs // attrs to set on the destination object. + encryptionKey []byte + keyName string +} + +type rewriteObjectRequest struct { + srcObject sourceObject + dstObject destinationObject + predefinedACL string + token string + maxBytesRewrittenPerCall int64 +} + +type rewriteObjectResponse struct { + resource *ObjectAttrs + done bool + written int64 + size int64 + token string +} diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go index 61983df5adaed..a0b9a2683c7e8 100644 --- a/vendor/cloud.google.com/go/storage/copy.go +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -20,7 +20,6 @@ import ( "fmt" "cloud.google.com/go/internal/trace" - raw "google.golang.org/api/storage/v1" ) // CopierFrom creates a Copier that can copy src to dst. @@ -70,6 +69,15 @@ type Copier struct { DestinationKMSKeyName string dst, src *ObjectHandle + + // The maximum number of bytes that will be rewritten per rewrite request. + // Most callers shouldn't need to specify this parameter - it is primarily + // in place to support testing. If specified the value must be an integral + // multiple of 1 MiB (1048576). Also, this only applies to requests where + // the source and destination span locations and/or storage classes. Finally, + // this value must not change across rewrite calls else you'll get an error + // that the `rewriteToken` is invalid. + maxBytesRewrittenPerCall int64 } // Run performs the copy. @@ -86,66 +94,59 @@ func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil { return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key") } + if c.dst.gen != defaultGen { + return nil, fmt.Errorf("storage: generation cannot be specified on copy destination, got %v", c.dst.gen) + } // Convert destination attributes to raw form, omitting the bucket. // If the bucket is included but name or content-type aren't, the service // returns a 400 with "Required" as the only message. Omitting the bucket // does not cause any problems. - rawObject := c.ObjectAttrs.toRawObject("") + req := &rewriteObjectRequest{ + srcObject: sourceObject{ + name: c.src.object, + bucket: c.src.bucket, + gen: c.src.gen, + conds: c.src.conds, + encryptionKey: c.src.encryptionKey, + }, + dstObject: destinationObject{ + name: c.dst.object, + bucket: c.dst.bucket, + conds: c.dst.conds, + attrs: &c.ObjectAttrs, + encryptionKey: c.dst.encryptionKey, + keyName: c.DestinationKMSKeyName, + }, + predefinedACL: c.PredefinedACL, + token: c.RewriteToken, + maxBytesRewrittenPerCall: c.maxBytesRewrittenPerCall, + } + + isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist) + var userProject string + if c.dst.userProject != "" { + userProject = c.dst.userProject + } else if c.src.userProject != "" { + userProject = c.src.userProject + } + opts := makeStorageOpts(isIdempotent, c.dst.retry, userProject) + for { - res, err := c.callRewrite(ctx, rawObject) + res, err := c.dst.c.tc.RewriteObject(ctx, req, opts...) if err != nil { return nil, err } + c.RewriteToken = res.token + req.token = res.token if c.ProgressFunc != nil { - c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize)) + c.ProgressFunc(uint64(res.written), uint64(res.size)) } - if res.Done { // Finished successfully. - return newObject(res.Resource), nil + if res.done { // Finished successfully. + return res.resource, nil } } } -func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) { - call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj) - - call.Context(ctx).Projection("full") - if c.RewriteToken != "" { - call.RewriteToken(c.RewriteToken) - } - if c.DestinationKMSKeyName != "" { - call.DestinationKmsKeyName(c.DestinationKMSKeyName) - } - if c.PredefinedACL != "" { - call.DestinationPredefinedAcl(c.PredefinedACL) - } - if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil { - return nil, err - } - if c.dst.userProject != "" { - call.UserProject(c.dst.userProject) - } else if c.src.userProject != "" { - call.UserProject(c.src.userProject) - } - if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil { - return nil, err - } - if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { - return nil, err - } - if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil { - return nil, err - } - var res *raw.RewriteResponse - var err error - setClientHeader(call.Header()) - err = runWithRetry(ctx, func() error { res, err = call.Do(); return err }) - if err != nil { - return nil, err - } - c.RewriteToken = res.RewriteToken - return res, nil -} - // ComposerFrom creates a Composer that can compose srcs into dst. // You can immediately call Run on the returned Composer, or you can // configure it first. @@ -185,17 +186,13 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { if err := c.dst.validate(); err != nil { return nil, err } + if c.dst.gen != defaultGen { + return nil, fmt.Errorf("storage: generation cannot be specified on compose destination, got %v", c.dst.gen) + } if len(c.srcs) == 0 { return nil, errors.New("storage: at least one source object must be specified") } - req := &raw.ComposeRequest{} - // Compose requires a non-empty Destination, so we always set it, - // even if the caller-provided ObjectAttrs is the zero value. - req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket) - if c.SendCRC32C { - req.Destination.Crc32c = encodeUint32(c.ObjectAttrs.CRC32C) - } for _, src := range c.srcs { if err := src.validate(); err != nil { return nil, err @@ -206,33 +203,31 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { if src.encryptionKey != nil { return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) } - srcObj := &raw.ComposeRequestSourceObjects{ - Name: src.object, - } - if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { - return nil, err - } - req.SourceObjects = append(req.SourceObjects, srcObj) } - call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx) - if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil { - return nil, err - } - if c.dst.userProject != "" { - call.UserProject(c.dst.userProject) + req := &composeObjectRequest{ + dstBucket: c.dst.bucket, + predefinedACL: c.PredefinedACL, + sendCRC32C: c.SendCRC32C, } - if c.PredefinedACL != "" { - call.DestinationPredefinedAcl(c.PredefinedACL) - } - if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { - return nil, err + req.dstObject = destinationObject{ + name: c.dst.object, + bucket: c.dst.bucket, + conds: c.dst.conds, + attrs: &c.ObjectAttrs, + encryptionKey: c.dst.encryptionKey, } - var obj *raw.Object - setClientHeader(call.Header()) - err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) - if err != nil { - return nil, err + for _, src := range c.srcs { + s := sourceObject{ + name: src.object, + bucket: src.bucket, + gen: src.gen, + conds: src.conds, + } + req.srcs = append(req.srcs, s) } - return newObject(obj), nil + + isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist) + opts := makeStorageOpts(isIdempotent, c.dst.retry, c.dst.userProject) + return c.dst.c.tc.ComposeObject(ctx, req, opts...) } diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index 614ea11a5904e..8bf3098431e70 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -19,156 +19,186 @@ Google Cloud Storage stores data in named objects, which are grouped into bucket More information about Google Cloud Storage is available at https://cloud.google.com/storage/docs. -See https://godoc.org/cloud.google.com/go for authentication, timeouts, +See https://pkg.go.dev/cloud.google.com/go for authentication, timeouts, connection pooling and similar aspects of this package. -All of the methods of this package use exponential backoff to retry calls that fail -with certain errors, as described in -https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues -indefinitely unless the controlling context is canceled or the client is closed. See -context.WithTimeout and context.WithCancel. +# Creating a Client +To start working with this package, create a [Client]: -Creating a Client - -To start working with this package, create a client: - - ctx := context.Background() - client, err := storage.NewClient(ctx) - if err != nil { - // TODO: Handle error. - } + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } -The client will use your default application credentials. +The client will use your default application credentials. Clients should be +reused instead of created as needed. The methods of [Client] are safe for +concurrent use by multiple goroutines. If you only wish to access public data, you can create an unauthenticated client with - client, err := storage.NewClient(ctx, option.WithoutAuthentication()) + client, err := storage.NewClient(ctx, option.WithoutAuthentication()) + +To use an emulator with this library, you can set the STORAGE_EMULATOR_HOST +environment variable to the address at which your emulator is running. This will +send requests to that address instead of to Cloud Storage. You can then create +and use a client as usual: + + // Set STORAGE_EMULATOR_HOST environment variable. + err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000") + if err != nil { + // TODO: Handle error. + } + + // Create client as usual. + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + // This request is now directed to http://localhost:9000/storage/v1/b + // instead of https://storage.googleapis.com/storage/v1/b + if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil { + // TODO: Handle error. + } + +Please note that there is no official emulator for Cloud Storage. -Buckets +# Buckets A Google Cloud Storage bucket is a collection of objects. To work with a bucket, make a bucket handle: - bkt := client.Bucket(bucketName) + bkt := client.Bucket(bucketName) A handle is a reference to a bucket. You can have a handle even if the bucket doesn't exist yet. To create a bucket in Google Cloud Storage, -call Create on the handle: +call [BucketHandle.Create]: - if err := bkt.Create(ctx, projectID, nil); err != nil { - // TODO: Handle error. - } + if err := bkt.Create(ctx, projectID, nil); err != nil { + // TODO: Handle error. + } Note that although buckets are associated with projects, bucket names are global across all projects. Each bucket has associated metadata, represented in this package by -BucketAttrs. The third argument to BucketHandle.Create allows you to set -the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use -Attrs: +[BucketAttrs]. The third argument to [BucketHandle.Create] allows you to set +the initial [BucketAttrs] of a bucket. To retrieve a bucket's attributes, use +[BucketHandle.Attrs]: - attrs, err := bkt.Attrs(ctx) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", - attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) + attrs, err := bkt.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", + attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) -Objects +# Objects An object holds arbitrary data as a sequence of bytes, like a file. You refer to objects using a handle, just as with buckets, but unlike buckets you don't explicitly create an object. Instead, the first time you write -to an object it will be created. You can use the standard Go io.Reader -and io.Writer interfaces to read and write object data: - - obj := bkt.Object("data") - // Write something to obj. - // w implements io.Writer. - w := obj.NewWriter(ctx) - // Write some text to obj. This will either create the object or overwrite whatever is there already. - if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { - // TODO: Handle error. - } - // Close, just like writing a file. - if err := w.Close(); err != nil { - // TODO: Handle error. - } - - // Read it back. - r, err := obj.NewReader(ctx) - if err != nil { - // TODO: Handle error. - } - defer r.Close() - if _, err := io.Copy(os.Stdout, r); err != nil { - // TODO: Handle error. - } - // Prints "This object contains text." - -Objects also have attributes, which you can fetch with Attrs: - - objAttrs, err := obj.Attrs(ctx) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("object %s has size %d and can be read using %s\n", - objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) - -Listing objects - -Listing objects in a bucket is done with the Bucket.Objects method: - - query := &storage.Query{Prefix: ""} - - var names []string - it := bkt.Objects(ctx, query) - for { - attrs, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - log.Fatal(err) - } - names = append(names, attrs.Name) - } +to an object it will be created. You can use the standard Go [io.Reader] +and [io.Writer] interfaces to read and write object data: + + obj := bkt.Object("data") + // Write something to obj. + // w implements io.Writer. + w := obj.NewWriter(ctx) + // Write some text to obj. This will either create the object or overwrite whatever is there already. + if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { + // TODO: Handle error. + } + // Close, just like writing a file. + if err := w.Close(); err != nil { + // TODO: Handle error. + } + + // Read it back. + r, err := obj.NewReader(ctx) + if err != nil { + // TODO: Handle error. + } + defer r.Close() + if _, err := io.Copy(os.Stdout, r); err != nil { + // TODO: Handle error. + } + // Prints "This object contains text." + +Objects also have attributes, which you can fetch with [ObjectHandle.Attrs]: + + objAttrs, err := obj.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("object %s has size %d and can be read using %s\n", + objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) + +# Listing objects + +Listing objects in a bucket is done with the [BucketHandle.Objects] method: + + query := &storage.Query{Prefix: ""} + + var names []string + it := bkt.Objects(ctx, query) + for { + attrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + log.Fatal(err) + } + names = append(names, attrs.Name) + } + +Objects are listed lexicographically by name. To filter objects +lexicographically, [Query.StartOffset] and/or [Query.EndOffset] can be used: + + query := &storage.Query{ + Prefix: "", + StartOffset: "bar/", // Only list objects lexicographically >= "bar/" + EndOffset: "foo/", // Only list objects lexicographically < "foo/" + } + + // ... as before If only a subset of object attributes is needed when listing, specifying this -subset using Query.SetAttrSelection may speed up the listing process: +subset using [Query.SetAttrSelection] may speed up the listing process: - query := &storage.Query{Prefix: ""} - query.SetAttrSelection([]string{"Name"}) + query := &storage.Query{Prefix: ""} + query.SetAttrSelection([]string{"Name"}) - // ... as before + // ... as before -ACLs +# ACLs Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of ACLRules, each of which specifies the role of a user, group or project. ACLs are suitable for fine-grained control, but you may prefer using IAM to control -access at the project level (see -https://cloud.google.com/storage/docs/access-control/iam). +access at the project level (see [Cloud Storage IAM docs]. -To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method: +To list the ACLs of a bucket or object, obtain an [ACLHandle] and call [ACLHandle.List]: - acls, err := obj.ACL().List(ctx) - if err != nil { - // TODO: Handle error. - } - for _, rule := range acls { - fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) - } + acls, err := obj.ACL().List(ctx) + if err != nil { + // TODO: Handle error. + } + for _, rule := range acls { + fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) + } You can also set and delete ACLs. -Conditions +# Conditions Every object has a generation and a metageneration. The generation changes whenever the content changes, and the metageneration changes whenever the -metadata changes. Conditions let you check these values before an operation; +metadata changes. [Conditions] let you check these values before an operation; the operation only executes if the conditions match. You can use conditions to prevent race conditions in read-modify-write operations. @@ -176,43 +206,123 @@ For example, say you've read an object's metadata into objAttrs. Now you want to write to that object, but only if its contents haven't changed since you read it. Here is how to express that: - w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) - // Proceed with writing as above. + w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) + // Proceed with writing as above. -Signed URLs +# Signed URLs You can obtain a URL that lets anyone read or write an object for a limited time. -You don't need to create a client to do this. See the documentation of -SignedURL for details. +Signing a URL requires credentials authorized to sign a URL. To use the same +authentication that was used when instantiating the Storage client, use +[BucketHandle.SignedURL]. - url, err := storage.SignedURL(bucketName, "shared-object", opts) - if err != nil { - // TODO: Handle error. - } - fmt.Println(url) + url, err := client.Bucket(bucketName).SignedURL(objectName, opts) + if err != nil { + // TODO: Handle error. + } + fmt.Println(url) -Post Policy V4 Signed Request +You can also sign a URL without creating a client. See the documentation of +[SignedURL] for details. + + url, err := storage.SignedURL(bucketName, "shared-object", opts) + if err != nil { + // TODO: Handle error. + } + fmt.Println(url) + +# Post Policy V4 Signed Request A type of signed request that allows uploads through HTML forms directly to Cloud Storage with temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised by a user. -For more information, please see https://cloud.google.com/storage/docs/xml-api/post-object as well -as the documentation of GenerateSignedPostPolicyV4. - - pv4, err := storage.GenerateSignedPostPolicyV4(bucketName, objectName, opts) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields) - -Errors - -Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error). -These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example: +For more information, please see the [XML POST Object docs] as well +as the documentation of [BucketHandle.GenerateSignedPostPolicyV4]. - if e, ok := err.(*googleapi.Error); ok { + pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields) + +# Credential requirements for signing + +If the GoogleAccessID and PrivateKey option fields are not provided, they will +be automatically detected by [BucketHandle.SignedURL] and +[BucketHandle.GenerateSignedPostPolicyV4] if any of the following are true: + - you are authenticated to the Storage Client with a service account's + downloaded private key, either directly in code or by setting the + GOOGLE_APPLICATION_CREDENTIALS environment variable (see [Other Environments]), + - your application is running on Google Compute Engine (GCE), or + - you are logged into [gcloud using application default credentials] + with [impersonation enabled]. + +Detecting GoogleAccessID may not be possible if you are authenticated using a +token source or using [option.WithHTTPClient]. In this case, you can provide a +service account email for GoogleAccessID and the client will attempt to sign +the URL or Post Policy using that service account. + +To generate the signature, you must have: + - iam.serviceAccounts.signBlob permissions on the GoogleAccessID service + account, and + - the [IAM Service Account Credentials API] enabled (unless authenticating + with a downloaded private key). + +# Errors + +Errors returned by this client are often of the type [googleapi.Error]. +These errors can be introspected for more information by using [errors.As] +with the richer [googleapi.Error] type. For example: + + var e *googleapi.Error + if ok := errors.As(err, &e); ok { if e.Code == 409 { ... } } + +# Retrying failed requests + +Methods in this package may retry calls that fail with transient errors. +Retrying continues indefinitely unless the controlling context is canceled, the +client is closed, or a non-transient error is received. To stop retries from +continuing, use context timeouts or cancellation. + +The retry strategy in this library follows best practices for Cloud Storage. By +default, operations are retried only if they are idempotent, and exponential +backoff with jitter is employed. In addition, errors are only retried if they +are defined as transient by the service. See the [Cloud Storage retry docs] +for more information. + +Users can configure non-default retry behavior for a single library call (using +[BucketHandle.Retryer] and [ObjectHandle.Retryer]) or for all calls made by a +client (using [Client.SetRetry]). For example: + + o := client.Bucket(bucket).Object(object).Retryer( + // Use WithBackoff to change the timing of the exponential backoff. + storage.WithBackoff(gax.Backoff{ + Initial: 2 * time.Second, + }), + // Use WithPolicy to configure the idempotency policy. RetryAlways will + // retry the operation even if it is non-idempotent. + storage.WithPolicy(storage.RetryAlways), + ) + + // Use a context timeout to set an overall deadline on the call, including all + // potential retries. + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + // Delete an object using the specified strategy and timeout. + if err := o.Delete(ctx); err != nil { + // Handle err. + } + +[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam +[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object +[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy +[Other Environments]: https://cloud.google.com/storage/docs/authentication#libauth +[gcloud using application default credentials]: https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login +[impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account +[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview */ package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/emulator_test.sh b/vendor/cloud.google.com/go/storage/emulator_test.sh new file mode 100644 index 0000000000000..7bad7cf391ccf --- /dev/null +++ b/vendor/cloud.google.com/go/storage/emulator_test.sh @@ -0,0 +1,92 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License.. + +# Fail on any error +set -eo pipefail + +# Display commands being run +set -x + +# Only run on Go 1.17+ +min_minor_ver=17 + +v=`go version | { read _ _ v _; echo ${v#go}; }` +comps=(${v//./ }) +minor_ver=${comps[1]} + +if [ "$minor_ver" -lt "$min_minor_ver" ]; then + echo minor version $minor_ver, skipping + exit 0 +fi + +export STORAGE_EMULATOR_HOST="http://localhost:9000" +export STORAGE_EMULATOR_HOST_GRPC="localhost:8888" + +DEFAULT_IMAGE_NAME='gcr.io/cloud-devrel-public-resources/storage-testbench' +DEFAULT_IMAGE_TAG='latest' +DOCKER_IMAGE=${DEFAULT_IMAGE_NAME}:${DEFAULT_IMAGE_TAG} +CONTAINER_NAME=storage_testbench + +# Note: --net=host makes the container bind directly to the Docker host’s network, +# with no network isolation. If we were to use port-mapping instead, reset connection errors +# would be captured differently and cause unexpected test behaviour. +# The host networking driver works only on Linux hosts. +# See more about using host networking: https://docs.docker.com/network/host/ +DOCKER_NETWORK="--net=host" +# Note: We do not expect the RetryConformanceTest suite to pass on darwin due to +# differences in the network errors emitted by the system. +if [ `go env GOOS` == 'darwin' ]; then + DOCKER_NETWORK="-p 9000:9000 -p 8888:8888" +fi + +# Get the docker image for the testbench +docker pull $DOCKER_IMAGE + +# Start the testbench + +docker run --name $CONTAINER_NAME --rm -d $DOCKER_NETWORK $DOCKER_IMAGE +echo "Running the Cloud Storage testbench: $STORAGE_EMULATOR_HOST" +sleep 1 + +# Stop the testbench & cleanup environment variables +function cleanup() { + echo "Cleanup testbench" + docker stop $CONTAINER_NAME + unset STORAGE_EMULATOR_HOST; + unset STORAGE_EMULATOR_HOST_GRPC; +} +trap cleanup EXIT + +# Check that the server is running - retry several times to allow for start-up time +response=$(curl -w "%{http_code}\n" $STORAGE_EMULATOR_HOST --retry-connrefused --retry 5 -o /dev/null) + +if [[ $response != 200 ]] +then + echo "Testbench server did not start correctly" + exit 1 +fi + +# Start the gRPC server on port 8888. +echo "Starting the gRPC server on port 8888" +response=$(curl -w "%{http_code}\n" --retry 5 --retry-max-time 40 -o /dev/null "$STORAGE_EMULATOR_HOST/start_grpc?port=8888") + +if [[ $response != 200 ]] +then + echo "Testbench gRPC server did not start correctly" + exit 1 +fi + +# Run tests +go test -v -timeout 10m ./ -run="^Test(RetryConformance|.*Emulated)$" -short 2>&1 | tee -a sponge_log.log diff --git a/vendor/cloud.google.com/go/storage/go110.go b/vendor/cloud.google.com/go/storage/go110.go deleted file mode 100644 index c1273d59ade6f..0000000000000 --- a/vendor/cloud.google.com/go/storage/go110.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.10 - -package storage - -import ( - "net/url" - "strings" - - "google.golang.org/api/googleapi" -) - -func shouldRetry(err error) bool { - switch e := err.(type) { - case *googleapi.Error: - // Retry on 429 and 5xx, according to - // https://cloud.google.com/storage/docs/exponential-backoff. - return e.Code == 429 || (e.Code >= 500 && e.Code < 600) - case *url.Error: - // Retry socket-level errors ECONNREFUSED and ENETUNREACH (from syscall). - // Unfortunately the error type is unexported, so we resort to string - // matching. - retriable := []string{"connection refused", "connection reset"} - for _, s := range retriable { - if strings.Contains(e.Error(), s) { - return true - } - } - return false - case interface{ Temporary() bool }: - return e.Temporary() - default: - return false - } -} diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go new file mode 100644 index 0000000000000..1dfb6f8302be3 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -0,0 +1,1743 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "encoding/base64" + "fmt" + "io" + "net/url" + "os" + + "cloud.google.com/go/internal/trace" + gapic "cloud.google.com/go/storage/internal/apiv2" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + iampb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // defaultConnPoolSize is the default number of connections + // to initialize in the GAPIC gRPC connection pool. A larger + // connection pool may be necessary for jobs that require + // high throughput and/or leverage many concurrent streams. + // + // This is only used for the gRPC client. + defaultConnPoolSize = 4 + + // maxPerMessageWriteSize is the maximum amount of content that can be sent + // per WriteObjectRequest message. A buffer reaching this amount will + // precipitate a flush of the buffer. It is only used by the gRPC Writer + // implementation. + maxPerMessageWriteSize int = int(storagepb.ServiceConstants_MAX_WRITE_CHUNK_BYTES) + + // globalProjectAlias is the project ID alias used for global buckets. + // + // This is only used for the gRPC API. + globalProjectAlias = "_" + + // msgEntityNotSupported indicates ACL entites using project ID are not currently supported. + // + // This is only used for the gRPC API. + msgEntityNotSupported = "The gRPC API currently does not support ACL entities using project ID, use project numbers instead" +) + +// defaultGRPCOptions returns a set of the default client options +// for gRPC client initialization. +func defaultGRPCOptions() []option.ClientOption { + defaults := []option.ClientOption{ + option.WithGRPCConnectionPool(defaultConnPoolSize), + } + + // Set emulator options for gRPC if an emulator was specified. Note that in a + // hybrid client, STORAGE_EMULATOR_HOST will set the host to use for HTTP and + // STORAGE_EMULATOR_HOST_GRPC will set the host to use for gRPC (when using a + // local emulator, HTTP and gRPC must use different ports, so this is + // necessary). + // + // TODO: When the newHybridClient is not longer used, remove + // STORAGE_EMULATOR_HOST_GRPC and use STORAGE_EMULATOR_HOST for both the + // HTTP and gRPC based clients. + if host := os.Getenv("STORAGE_EMULATOR_HOST_GRPC"); host != "" { + // Strip the scheme from the emulator host. WithEndpoint does not take a + // scheme for gRPC. + host = stripScheme(host) + + defaults = append(defaults, + option.WithEndpoint(host), + option.WithGRPCDialOption(grpc.WithInsecure()), + option.WithoutAuthentication(), + ) + } else { + // Only enable DirectPath when the emulator is not being targeted. + defaults = append(defaults, internaloption.EnableDirectPath(true)) + } + + return defaults +} + +// grpcStorageClient is the gRPC API implementation of the transport-agnostic +// storageClient interface. +type grpcStorageClient struct { + raw *gapic.Client + settings *settings +} + +// newGRPCStorageClient initializes a new storageClient that uses the gRPC +// Storage API. +func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { + s := initSettings(opts...) + s.clientOption = append(defaultGRPCOptions(), s.clientOption...) + + g, err := gapic.NewClient(ctx, s.clientOption...) + if err != nil { + return nil, err + } + + return &grpcStorageClient{ + raw: g, + settings: s, + }, nil +} + +func (c *grpcStorageClient) Close() error { + return c.raw.Close() +} + +// Top-level methods. + +func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.GetServiceAccountRequest{ + Project: toProjectResource(project), + } + var resp *storagepb.ServiceAccount + err := run(ctx, func() error { + var err error + resp, err = c.raw.GetServiceAccount(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return "", err + } + return resp.EmailAddress, err +} + +func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + b := attrs.toProtoBucket() + b.Name = bucket + // If there is lifecycle information but no location, explicitly set + // the location. This is a GCS quirk/bug. + if b.GetLocation() == "" && b.GetLifecycle() != nil { + b.Location = "US" + } + + req := &storagepb.CreateBucketRequest{ + Parent: toProjectResource(project), + Bucket: b, + BucketId: b.GetName(), + } + if attrs != nil { + req.PredefinedAcl = attrs.PredefinedACL + req.PredefinedDefaultObjectAcl = attrs.PredefinedDefaultObjectACL + } + + var battrs *BucketAttrs + err := run(ctx, func() error { + res, err := c.raw.CreateBucket(ctx, req, s.gax...) + + battrs = newBucketFromProto(res) + + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + + return battrs, err +} + +func (c *grpcStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator { + s := callSettings(c.settings, opts...) + it := &BucketIterator{ + ctx: ctx, + projectID: project, + } + + var gitr *gapic.BucketIterator + fetch := func(pageSize int, pageToken string) (token string, err error) { + // Initialize GAPIC-based iterator when pageToken is empty, which + // indicates that this fetch call is attempting to get the first page. + // + // Note: Initializing the GAPIC-based iterator lazily is necessary to + // capture the BucketIterator.Prefix set by the user *after* the + // BucketIterator is returned to them from the veneer. + if pageToken == "" { + req := &storagepb.ListBucketsRequest{ + Parent: toProjectResource(it.projectID), + Prefix: it.Prefix, + } + gitr = c.raw.ListBuckets(it.ctx, req, s.gax...) + } + + var buckets []*storagepb.Bucket + var next string + err = run(it.ctx, func() error { + buckets, next, err = gitr.InternalFetch(pageSize, pageToken) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return "", err + } + + for _, bkt := range buckets { + b := newBucketFromProto(bkt) + it.buckets = append(it.buckets, b) + } + + return next, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.buckets) }, + func() interface{} { b := it.buckets; it.buckets = nil; return b }) + + return it +} + +// Bucket methods. + +func (c *grpcStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := &storagepb.DeleteBucketRequest{ + Name: bucketResourceName(globalProjectAlias, bucket), + } + if err := applyBucketCondsProto("grpcStorageClient.DeleteBucket", conds, req); err != nil { + return err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + return run(ctx, func() error { + return c.raw.DeleteBucket(ctx, req, s.gax...) + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) +} + +func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.GetBucketRequest{ + Name: bucketResourceName(globalProjectAlias, bucket), + ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, + } + if err := applyBucketCondsProto("grpcStorageClient.GetBucket", conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + var battrs *BucketAttrs + err := run(ctx, func() error { + res, err := c.raw.GetBucket(ctx, req, s.gax...) + + battrs = newBucketFromProto(res) + + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return nil, ErrBucketNotExist + } + + return battrs, err +} +func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + b := uattrs.toProtoBucket() + b.Name = bucketResourceName(globalProjectAlias, bucket) + req := &storagepb.UpdateBucketRequest{ + Bucket: b, + PredefinedAcl: uattrs.PredefinedACL, + PredefinedDefaultObjectAcl: uattrs.PredefinedDefaultObjectACL, + } + if err := applyBucketCondsProto("grpcStorageClient.UpdateBucket", conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + var paths []string + fieldMask := &fieldmaskpb.FieldMask{ + Paths: paths, + } + if uattrs.CORS != nil { + fieldMask.Paths = append(fieldMask.Paths, "cors") + } + if uattrs.DefaultEventBasedHold != nil { + fieldMask.Paths = append(fieldMask.Paths, "default_event_based_hold") + } + if uattrs.RetentionPolicy != nil { + fieldMask.Paths = append(fieldMask.Paths, "retention_policy") + } + if uattrs.VersioningEnabled != nil { + fieldMask.Paths = append(fieldMask.Paths, "versioning") + } + if uattrs.RequesterPays != nil { + fieldMask.Paths = append(fieldMask.Paths, "billing") + } + if uattrs.BucketPolicyOnly != nil || uattrs.UniformBucketLevelAccess != nil || uattrs.PublicAccessPrevention != PublicAccessPreventionUnknown { + fieldMask.Paths = append(fieldMask.Paths, "iam_config") + } + if uattrs.Encryption != nil { + fieldMask.Paths = append(fieldMask.Paths, "encryption") + } + if uattrs.Lifecycle != nil { + fieldMask.Paths = append(fieldMask.Paths, "lifecycle") + } + if uattrs.Logging != nil { + fieldMask.Paths = append(fieldMask.Paths, "logging") + } + if uattrs.Website != nil { + fieldMask.Paths = append(fieldMask.Paths, "website") + } + if uattrs.PredefinedACL != "" { + // In cases where PredefinedACL is set, Acl is cleared. + fieldMask.Paths = append(fieldMask.Paths, "acl") + } + if uattrs.PredefinedDefaultObjectACL != "" { + // In cases where PredefinedDefaultObjectACL is set, DefaultObjectAcl is cleared. + fieldMask.Paths = append(fieldMask.Paths, "default_object_acl") + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + if uattrs.acl != nil { + // In cases where acl is set by UpdateBucketACL method. + fieldMask.Paths = append(fieldMask.Paths, "acl") + } + if uattrs.defaultObjectACL != nil { + // In cases where defaultObjectACL is set by UpdateBucketACL method. + fieldMask.Paths = append(fieldMask.Paths, "default_object_acl") + } + if uattrs.StorageClass != "" { + fieldMask.Paths = append(fieldMask.Paths, "storage_class") + } + if uattrs.RPO != RPOUnknown { + fieldMask.Paths = append(fieldMask.Paths, "rpo") + } + if uattrs.Autoclass != nil { + fieldMask.Paths = append(fieldMask.Paths, "autoclass") + } + // TODO(cathyo): Handle labels. Pending b/230510191. + req.UpdateMask = fieldMask + + var battrs *BucketAttrs + err := run(ctx, func() error { + res, err := c.raw.UpdateBucket(ctx, req, s.gax...) + battrs = newBucketFromProto(res) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + + return battrs, err +} +func (c *grpcStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := &storagepb.LockBucketRetentionPolicyRequest{ + Bucket: bucketResourceName(globalProjectAlias, bucket), + } + if err := applyBucketCondsProto("grpcStorageClient.LockBucketRetentionPolicy", conds, req); err != nil { + return err + } + + return run(ctx, func() error { + _, err := c.raw.LockBucketRetentionPolicy(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + +} +func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator { + s := callSettings(c.settings, opts...) + it := &ObjectIterator{ + ctx: ctx, + } + if q != nil { + it.query = *q + } + req := &storagepb.ListObjectsRequest{ + Parent: bucketResourceName(globalProjectAlias, bucket), + Prefix: it.query.Prefix, + Delimiter: it.query.Delimiter, + Versions: it.query.Versions, + LexicographicStart: it.query.StartOffset, + LexicographicEnd: it.query.EndOffset, + IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter, + ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + gitr := c.raw.ListObjects(it.ctx, req, s.gax...) + fetch := func(pageSize int, pageToken string) (token string, err error) { + var objects []*storagepb.Object + err = run(it.ctx, func() error { + objects, token, err = gitr.InternalFetch(pageSize, pageToken) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound { + err = ErrBucketNotExist + } + return "", err + } + + for _, obj := range objects { + b := newObjectFromProto(obj) + it.items = append(it.items, b) + } + + // Response is always non-nil after a successful request. + res := gitr.Response.(*storagepb.ListObjectsResponse) + for _, prefix := range res.GetPrefixes() { + it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) + } + + return token, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + + return it +} + +// Object metadata methods. + +func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := &storagepb.DeleteObjectRequest{ + Bucket: bucketResourceName(globalProjectAlias, bucket), + Object: object, + } + if err := applyCondsProto("grpcStorageClient.DeleteObject", gen, conds, req); err != nil { + return err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + err := run(ctx, func() error { + return c.raw.DeleteObject(ctx, req, s.gax...) + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return ErrObjectNotExist + } + return err +} + +func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.GetObjectRequest{ + Bucket: bucketResourceName(globalProjectAlias, bucket), + Object: object, + // ProjectionFull by default. + ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, + } + if err := applyCondsProto("grpcStorageClient.GetObject", gen, conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + if encryptionKey != nil { + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey) + } + + var attrs *ObjectAttrs + err := run(ctx, func() error { + res, err := c.raw.GetObject(ctx, req, s.gax...) + attrs = newObjectFromProto(res) + + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return nil, ErrObjectNotExist + } + + return attrs, err +} + +func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + o := uattrs.toProtoObject(bucketResourceName(globalProjectAlias, bucket), object) + req := &storagepb.UpdateObjectRequest{ + Object: o, + PredefinedAcl: uattrs.PredefinedACL, + } + if err := applyCondsProto("grpcStorageClient.UpdateObject", gen, conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + if encryptionKey != nil { + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey) + } + + fieldMask := &fieldmaskpb.FieldMask{Paths: nil} + if uattrs.EventBasedHold != nil { + fieldMask.Paths = append(fieldMask.Paths, "event_based_hold") + } + if uattrs.TemporaryHold != nil { + fieldMask.Paths = append(fieldMask.Paths, "temporary_hold") + } + if uattrs.ContentType != nil { + fieldMask.Paths = append(fieldMask.Paths, "content_type") + } + if uattrs.ContentLanguage != nil { + fieldMask.Paths = append(fieldMask.Paths, "content_language") + } + if uattrs.ContentEncoding != nil { + fieldMask.Paths = append(fieldMask.Paths, "content_encoding") + } + if uattrs.ContentDisposition != nil { + fieldMask.Paths = append(fieldMask.Paths, "content_disposition") + } + if uattrs.CacheControl != nil { + fieldMask.Paths = append(fieldMask.Paths, "cache_control") + } + if !uattrs.CustomTime.IsZero() { + fieldMask.Paths = append(fieldMask.Paths, "custom_time") + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + if uattrs.ACL != nil || len(uattrs.PredefinedACL) > 0 { + fieldMask.Paths = append(fieldMask.Paths, "acl") + } + // TODO(cathyo): Handle metadata. Pending b/230510191. + + req.UpdateMask = fieldMask + + var attrs *ObjectAttrs + err := run(ctx, func() error { + res, err := c.raw.UpdateObject(ctx, req, s.gax...) + attrs = newObjectFromProto(res) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if e, ok := status.FromError(err); ok && e.Code() == codes.NotFound { + return nil, ErrObjectNotExist + } + + return attrs, err +} + +// Default Object ACL methods. + +func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve BucketAttrs. + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return err + } + // Delete the entity and copy other remaining ACL entities. + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + // Return error if entity is not found or a project ID is used. + invalidEntity := true + var acl []ACLRule + for _, a := range attrs.DefaultObjectACL { + if a.Entity != entity { + acl = append(acl, a) + } + if a.Entity == entity { + invalidEntity = false + } + } + if invalidEntity { + return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.DefaultObjectACL, msgEntityNotSupported) + } + uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl} + // Call UpdateBucket with a MetagenerationMatch precondition set. + if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { + return err + } + return nil +} + +func (c *grpcStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return nil, err + } + return attrs.DefaultObjectACL, nil +} + +func (c *grpcStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve BucketAttrs. + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return err + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + var acl []ACLRule + aclRule := ACLRule{Entity: entity, Role: role} + acl = append(attrs.DefaultObjectACL, aclRule) + uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl} + // Call UpdateBucket with a MetagenerationMatch precondition set. + if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { + return err + } + return nil +} + +// Bucket ACL methods. + +func (c *grpcStorageClient) DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve BucketAttrs. + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return err + } + // Delete the entity and copy other remaining ACL entities. + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + // Return error if entity is not found or a project ID is used. + invalidEntity := true + var acl []ACLRule + for _, a := range attrs.ACL { + if a.Entity != entity { + acl = append(acl, a) + } + if a.Entity == entity { + invalidEntity = false + } + } + if invalidEntity { + return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.ACL, msgEntityNotSupported) + } + uattrs := &BucketAttrsToUpdate{acl: acl} + // Call UpdateBucket with a MetagenerationMatch precondition set. + if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { + return err + } + return nil +} + +func (c *grpcStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return nil, err + } + return attrs.ACL, nil +} + +func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve BucketAttrs. + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return err + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + var acl []ACLRule + aclRule := ACLRule{Entity: entity, Role: role} + acl = append(attrs.ACL, aclRule) + uattrs := &BucketAttrsToUpdate{acl: acl} + // Call UpdateBucket with a MetagenerationMatch precondition set. + if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { + return err + } + return nil +} + +// Object ACL methods. + +func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve ObjectAttrs. + attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + if err != nil { + return err + } + // Delete the entity and copy other remaining ACL entities. + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + // Return error if entity is not found or a project ID is used. + invalidEntity := true + var acl []ACLRule + for _, a := range attrs.ACL { + if a.Entity != entity { + acl = append(acl, a) + } + if a.Entity == entity { + invalidEntity = false + } + } + if invalidEntity { + return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.ACL, msgEntityNotSupported) + } + uattrs := &ObjectAttrsToUpdate{ACL: acl} + // Call UpdateObject with the specified metageneration. + if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil { + return err + } + return nil +} + +// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. +// Selecting a specific generation of this object is not currently supported by the client. +func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) { + o, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + if err != nil { + return nil, err + } + return o.ACL, nil +} + +func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve ObjectAttrs. + attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + if err != nil { + return err + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + var acl []ACLRule + aclRule := ACLRule{Entity: entity, Role: role} + acl = append(attrs.ACL, aclRule) + uattrs := &ObjectAttrsToUpdate{ACL: acl} + // Call UpdateObject with the specified metageneration. + if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil { + return err + } + return nil +} + +// Media operations. + +func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + dstObjPb := req.dstObject.attrs.toProtoObject(req.dstBucket) + dstObjPb.Name = req.dstObject.name + if err := applyCondsProto("ComposeObject destination", defaultGen, req.dstObject.conds, dstObjPb); err != nil { + return nil, err + } + if req.sendCRC32C { + dstObjPb.Checksums.Crc32C = &req.dstObject.attrs.CRC32C + } + + srcs := []*storagepb.ComposeObjectRequest_SourceObject{} + for _, src := range req.srcs { + srcObjPb := &storagepb.ComposeObjectRequest_SourceObject{Name: src.name} + if err := applyCondsProto("ComposeObject source", src.gen, src.conds, srcObjPb); err != nil { + return nil, err + } + srcs = append(srcs, srcObjPb) + } + + rawReq := &storagepb.ComposeObjectRequest{ + Destination: dstObjPb, + SourceObjects: srcs, + } + if req.predefinedACL != "" { + rawReq.DestinationPredefinedAcl = req.predefinedACL + } + if req.dstObject.encryptionKey != nil { + rawReq.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey) + } + + var obj *storagepb.Object + var err error + if err := run(ctx, func() error { + obj, err = c.raw.ComposeObject(ctx, rawReq, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil { + return nil, err + } + + return newObjectFromProto(obj), nil +} +func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) { + s := callSettings(c.settings, opts...) + obj := req.dstObject.attrs.toProtoObject("") + call := &storagepb.RewriteObjectRequest{ + SourceBucket: bucketResourceName(globalProjectAlias, req.srcObject.bucket), + SourceObject: req.srcObject.name, + RewriteToken: req.token, + DestinationBucket: bucketResourceName(globalProjectAlias, req.dstObject.bucket), + DestinationName: req.dstObject.name, + Destination: obj, + DestinationKmsKey: req.dstObject.keyName, + DestinationPredefinedAcl: req.predefinedACL, + CommonObjectRequestParams: toProtoCommonObjectRequestParams(req.dstObject.encryptionKey), + } + + // The userProject, whether source or destination project, is decided by the code calling the interface. + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + if err := applyCondsProto("Copy destination", defaultGen, req.dstObject.conds, call); err != nil { + return nil, err + } + if err := applySourceCondsProto(req.srcObject.gen, req.srcObject.conds, call); err != nil { + return nil, err + } + + if len(req.dstObject.encryptionKey) > 0 { + call.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey) + } + if len(req.srcObject.encryptionKey) > 0 { + srcParams := toProtoCommonObjectRequestParams(req.srcObject.encryptionKey) + call.CopySourceEncryptionAlgorithm = srcParams.GetEncryptionAlgorithm() + call.CopySourceEncryptionKeyBytes = srcParams.GetEncryptionKeyBytes() + call.CopySourceEncryptionKeySha256Bytes = srcParams.GetEncryptionKeySha256Bytes() + } + + call.MaxBytesRewrittenPerCall = req.maxBytesRewrittenPerCall + + var res *storagepb.RewriteResponse + var err error + + retryCall := func() error { res, err = c.raw.RewriteObject(ctx, call, s.gax...); return err } + + if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil { + return nil, err + } + + r := &rewriteObjectResponse{ + done: res.GetDone(), + written: res.GetTotalBytesRewritten(), + size: res.GetObjectSize(), + token: res.GetRewriteToken(), + resource: newObjectFromProto(res.GetResource()), + } + + return r, nil +} + +func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + // A negative length means "read to the end of the object", but the + // read_limit field it corresponds to uses zero to mean the same thing. Thus + // we coerce the length to 0 to read to the end of the object. + if params.length < 0 { + params.length = 0 + } + + b := bucketResourceName(globalProjectAlias, params.bucket) + req := &storagepb.ReadObjectRequest{ + Bucket: b, + Object: params.object, + CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey), + } + // The default is a negative value, which means latest. + if params.gen >= 0 { + req.Generation = params.gen + } + + // Define a function that initiates a Read with offset and length, assuming + // we have already read seen bytes. + reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) { + // If the context has already expired, return immediately without making + // we call. + if err := ctx.Err(); err != nil { + return nil, nil, err + } + + cc, cancel := context.WithCancel(ctx) + + start := params.offset + seen + // Only set a ReadLimit if length is greater than zero, because zero + // means read it all. + if params.length > 0 { + req.ReadLimit = params.length - seen + } + req.ReadOffset = start + + if err := applyCondsProto("gRPCReader.reopen", params.gen, params.conds, req); err != nil { + cancel() + return nil, nil, err + } + + var stream storagepb.Storage_ReadObjectClient + var msg *storagepb.ReadObjectResponse + var err error + + err = run(cc, func() error { + stream, err = c.raw.ReadObject(cc, req, s.gax...) + if err != nil { + return err + } + + msg, err = stream.Recv() + // These types of errors show up on the Recv call, rather than the + // initialization of the stream via ReadObject above. + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return ErrObjectNotExist + } + + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + // Close the stream context we just created to ensure we don't leak + // resources. + cancel() + return nil, nil, err + } + + return &readStreamResponse{stream, msg}, cancel, nil + } + + res, cancel, err := reopen(0) + if err != nil { + return nil, err + } + + // The first message was Recv'd on stream open, use it to populate the + // object metadata. + msg := res.response + obj := msg.GetMetadata() + // This is the size of the entire object, even if only a range was requested. + size := obj.GetSize() + + r = &Reader{ + Attrs: ReaderObjectAttrs{ + Size: size, + ContentType: obj.GetContentType(), + ContentEncoding: obj.GetContentEncoding(), + CacheControl: obj.GetCacheControl(), + LastModified: obj.GetUpdateTime().AsTime(), + Metageneration: obj.GetMetageneration(), + Generation: obj.GetGeneration(), + }, + reader: &gRPCReader{ + stream: res.stream, + reopen: reopen, + cancel: cancel, + size: size, + // Store the content from the first Recv in the + // client buffer for reading later. + leftovers: msg.GetChecksummedData().GetContent(), + settings: s, + }, + } + + cr := msg.GetContentRange() + if cr != nil { + r.Attrs.StartOffset = cr.GetStart() + r.remain = cr.GetEnd() - cr.GetStart() + 1 + } else { + r.remain = size + } + + // Only support checksums when reading an entire object, not a range. + if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length == 0 { + r.wantCRC = checksums.GetCrc32C() + r.checkCRC = true + } + + return r, nil +} + +func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { + s := callSettings(c.settings, opts...) + + var offset int64 + errorf := params.setError + progress := params.progress + setObj := params.setObj + + pr, pw := io.Pipe() + gw := newGRPCWriter(c, params, pr) + gw.settings = s + if s.userProject != "" { + gw.ctx = setUserProjectMetadata(gw.ctx, s.userProject) + } + + // This function reads the data sent to the pipe and sends sets of messages + // on the gRPC client-stream as the buffer is filled. + go func() { + defer close(params.donec) + + // Loop until there is an error or the Object has been finalized. + for { + // Note: This blocks until either the buffer is full or EOF is read. + recvd, doneReading, err := gw.read() + if err != nil { + err = checkCanceled(err) + errorf(err) + pr.CloseWithError(err) + return + } + + // The chunk buffer is full, but there is no end in sight. This + // means that a resumable upload will need to be used to send + // multiple chunks, until we are done reading data. Start a + // resumable upload if it has not already been started. + // Otherwise, all data will be sent over a single gRPC stream. + if !doneReading && gw.upid == "" { + err = gw.startResumableUpload() + if err != nil { + err = checkCanceled(err) + errorf(err) + pr.CloseWithError(err) + return + } + } + + o, off, finalized, err := gw.uploadBuffer(recvd, offset, doneReading) + if err != nil { + err = checkCanceled(err) + errorf(err) + pr.CloseWithError(err) + return + } + // At this point, the current buffer has been uploaded. Capture the + // committed offset here in case the upload was not finalized and + // another chunk is to be uploaded. + offset = off + progress(offset) + + // When we are done reading data and the chunk has been finalized, + // we are done. + if doneReading && finalized { + // Build Object from server's response. + setObj(newObjectFromProto(o)) + return + } + } + }() + + return pw, nil +} + +// IAM methods. + +func (c *grpcStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) { + // TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter. + s := callSettings(c.settings, opts...) + req := &iampb.GetIamPolicyRequest{ + Resource: bucketResourceName(globalProjectAlias, resource), + Options: &iampb.GetPolicyOptions{ + RequestedPolicyVersion: version, + }, + } + var rp *iampb.Policy + err := run(ctx, func() error { + var err error + rp, err = c.raw.GetIamPolicy(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + + return rp, err +} + +func (c *grpcStorageClient) SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error { + // TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter. + s := callSettings(c.settings, opts...) + + req := &iampb.SetIamPolicyRequest{ + Resource: bucketResourceName(globalProjectAlias, resource), + Policy: policy, + } + + return run(ctx, func() error { + _, err := c.raw.SetIamPolicy(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) +} + +func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { + // TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter. + s := callSettings(c.settings, opts...) + req := &iampb.TestIamPermissionsRequest{ + Resource: bucketResourceName(globalProjectAlias, resource), + Permissions: permissions, + } + var res *iampb.TestIamPermissionsResponse + err := run(ctx, func() error { + var err error + res, err = c.raw.TestIamPermissions(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +// HMAC Key methods. + +func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.GetHmacKeyRequest{ + AccessId: accessID, + Project: toProjectResource(project), + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + var metadata *storagepb.HmacKeyMetadata + err := run(ctx, func() error { + var err error + metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + return toHMACKeyFromProto(metadata), nil +} + +func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator { + s := callSettings(c.settings, opts...) + req := &storagepb.ListHmacKeysRequest{ + Project: toProjectResource(project), + ServiceAccountEmail: serviceAccountEmail, + ShowDeletedKeys: showDeletedKeys, + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + it := &HMACKeysIterator{ + ctx: ctx, + projectID: project, + retry: s.retry, + } + gitr := c.raw.ListHmacKeys(it.ctx, req, s.gax...) + fetch := func(pageSize int, pageToken string) (token string, err error) { + var hmacKeys []*storagepb.HmacKeyMetadata + err = run(it.ctx, func() error { + hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return "", err + } + for _, hkmd := range hmacKeys { + hk := toHMACKeyFromProto(hkmd) + it.hmacKeys = append(it.hmacKeys, hk) + } + + return token, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.hmacKeys) - it.index }, + func() interface{} { + prev := it.hmacKeys + it.hmacKeys = it.hmacKeys[:0] + it.index = 0 + return prev + }) + return it +} + +func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + hk := &storagepb.HmacKeyMetadata{ + AccessId: accessID, + Project: toProjectResource(project), + ServiceAccountEmail: serviceAccountEmail, + State: string(attrs.State), + Etag: attrs.Etag, + } + var paths []string + fieldMask := &fieldmaskpb.FieldMask{ + Paths: paths, + } + if attrs.State != "" { + fieldMask.Paths = append(fieldMask.Paths, "state") + } + req := &storagepb.UpdateHmacKeyRequest{ + HmacKey: hk, + UpdateMask: fieldMask, + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + var metadata *storagepb.HmacKeyMetadata + err := run(ctx, func() error { + var err error + metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + return toHMACKeyFromProto(metadata), nil +} + +func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.CreateHmacKeyRequest{ + Project: toProjectResource(project), + ServiceAccountEmail: serviceAccountEmail, + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + var res *storagepb.CreateHmacKeyResponse + err := run(ctx, func() error { + var err error + res, err = c.raw.CreateHmacKey(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + key := toHMACKeyFromProto(res.Metadata) + key.Secret = base64.StdEncoding.EncodeToString(res.SecretKeyBytes) + + return key, nil +} + +func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := &storagepb.DeleteHmacKeyRequest{ + AccessId: accessID, + Project: toProjectResource(project), + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + return run(ctx, func() error { + return c.raw.DeleteHmacKey(ctx, req, s.gax...) + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) +} + +// Notification methods. + +func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.ListNotifications") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + req := &storagepb.ListNotificationsRequest{ + Parent: bucketResourceName(globalProjectAlias, bucket), + } + var notifications []*storagepb.Notification + err = run(ctx, func() error { + gitr := c.raw.ListNotifications(ctx, req, s.gax...) + for { + // PageSize is not set and fallbacks to the API default pageSize of 100. + items, nextPageToken, err := gitr.InternalFetch(int(req.GetPageSize()), req.GetPageToken()) + if err != nil { + return err + } + notifications = append(notifications, items...) + // If there are no more results, nextPageToken is empty and err is nil. + if nextPageToken == "" { + return err + } + req.PageToken = nextPageToken + } + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + + return notificationsToMapFromProto(notifications), nil +} + +func (c *grpcStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.CreateNotification") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + req := &storagepb.CreateNotificationRequest{ + Parent: bucketResourceName(globalProjectAlias, bucket), + Notification: toProtoNotification(n), + } + var pbn *storagepb.Notification + err = run(ctx, func() error { + var err error + pbn, err = c.raw.CreateNotification(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + return toNotificationFromProto(pbn), err +} + +func (c *grpcStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.DeleteNotification") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + req := &storagepb.DeleteNotificationRequest{Name: id} + return run(ctx, func() error { + return c.raw.DeleteNotification(ctx, req, s.gax...) + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) +} + +// setUserProjectMetadata appends a project ID to the outgoing Context metadata +// via the x-goog-user-project system parameter defined at +// https://cloud.google.com/apis/docs/system-parameters. This is only for +// billing purposes, and is generally optional, except for requester-pays +// buckets. +func setUserProjectMetadata(ctx context.Context, project string) context.Context { + return metadata.AppendToOutgoingContext(ctx, "x-goog-user-project", project) +} + +type readStreamResponse struct { + stream storagepb.Storage_ReadObjectClient + response *storagepb.ReadObjectResponse +} + +type gRPCReader struct { + seen, size int64 + stream storagepb.Storage_ReadObjectClient + reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error) + leftovers []byte + cancel context.CancelFunc + settings *settings +} + +// Read reads bytes into the user's buffer from an open gRPC stream. +func (r *gRPCReader) Read(p []byte) (int, error) { + // No stream to read from, either never initiliazed or Close was called. + // Note: There is a potential concurrency issue if multiple routines are + // using the same reader. One encounters an error and the stream is closed + // and then reopened while the other routine attempts to read from it. + if r.stream == nil { + return 0, fmt.Errorf("reader has been closed") + } + + // The entire object has been read by this reader, return EOF. + if r.size != 0 && r.size == r.seen { + return 0, io.EOF + } + + var n int + // Read leftovers and return what was available to conform to the Reader + // interface: https://pkg.go.dev/io#Reader. + if len(r.leftovers) > 0 { + n = copy(p, r.leftovers) + r.seen += int64(n) + r.leftovers = r.leftovers[n:] + return n, nil + } + + // Attempt to Recv the next message on the stream. + msg, err := r.recv() + if err != nil { + return 0, err + } + + // TODO: Determine if we need to capture incremental CRC32C for this + // chunk. The Object CRC32C checksum is captured when directed to read + // the entire Object. If directed to read a range, we may need to + // calculate the range's checksum for verification if the checksum is + // present in the response here. + // TODO: Figure out if we need to support decompressive transcoding + // https://cloud.google.com/storage/docs/transcoding. + content := msg.GetChecksummedData().GetContent() + n = copy(p[n:], content) + leftover := len(content) - n + if leftover > 0 { + // Wasn't able to copy all of the data in the message, store for + // future Read calls. + r.leftovers = content[n:] + } + r.seen += int64(n) + + return n, nil +} + +// Close cancels the read stream's context in order for it to be closed and +// collected. +func (r *gRPCReader) Close() error { + if r.cancel != nil { + r.cancel() + } + r.stream = nil + return nil +} + +// recv attempts to Recv the next message on the stream. In the event +// that a retryable error is encountered, the stream will be closed, reopened, +// and Recv again. This will attempt to Recv until one of the following is true: +// +// * Recv is successful +// * A non-retryable error is encountered +// * The Reader's context is canceled +// +// The last error received is the one that is returned, which could be from +// an attempt to reopen the stream. +func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) { + msg, err := r.stream.Recv() + var shouldRetry = ShouldRetry + if r.settings.retry != nil && r.settings.retry.shouldRetry != nil { + shouldRetry = r.settings.retry.shouldRetry + } + if err != nil && shouldRetry(err) { + // This will "close" the existing stream and immediately attempt to + // reopen the stream, but will backoff if further attempts are necessary. + // Reopening the stream Recvs the first message, so if retrying is + // successful, the next logical chunk will be returned. + msg, err = r.reopenStream() + } + + return msg, err +} + +// reopenStream "closes" the existing stream and attempts to reopen a stream and +// sets the Reader's stream and cancelStream properties in the process. +func (r *gRPCReader) reopenStream() (*storagepb.ReadObjectResponse, error) { + // Close existing stream and initialize new stream with updated offset. + r.Close() + + res, cancel, err := r.reopen(r.seen) + if err != nil { + return nil, err + } + r.stream = res.stream + r.cancel = cancel + return res.response, nil +} + +func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) *gRPCWriter { + size := params.chunkSize + if params.chunkSize == 0 { + // TODO: Should we actually use the minimum of 256 KB here when the user + // indicates they want minimal memory usage? We cannot do a zero-copy, + // bufferless upload like HTTP/JSON can. + // TODO: We need to determine if we can avoid starting a + // resumable upload when the user *plans* to send more than bufSize but + // with a bufferless upload. + size = maxPerMessageWriteSize + } + + return &gRPCWriter{ + buf: make([]byte, size), + c: c, + ctx: params.ctx, + reader: r, + bucket: params.bucket, + attrs: params.attrs, + conds: params.conds, + encryptionKey: params.encryptionKey, + sendCRC32C: params.sendCRC32C, + } +} + +// gRPCWriter is a wrapper around the the gRPC client-stream API that manages +// sending chunks of data provided by the user over the stream. +type gRPCWriter struct { + c *grpcStorageClient + buf []byte + reader io.Reader + + ctx context.Context + + bucket string + attrs *ObjectAttrs + conds *Conditions + encryptionKey []byte + settings *settings + + sendCRC32C bool + + // The gRPC client-stream used for sending buffers. + stream storagepb.Storage_WriteObjectClient + + // The Resumable Upload ID started by a gRPC-based Writer. + upid string +} + +// startResumableUpload initializes a Resumable Upload with gRPC and sets the +// upload ID on the Writer. +func (w *gRPCWriter) startResumableUpload() error { + spec, err := w.writeObjectSpec() + if err != nil { + return err + } + req := &storagepb.StartResumableWriteRequest{ + WriteObjectSpec: spec, + CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey), + } + // TODO: Currently the checksums are only sent on the request to initialize + // the upload, but in the future, we must also support sending it + // on the *last* message of the stream. + req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) + return run(w.ctx, func() error { + upres, err := w.c.raw.StartResumableWrite(w.ctx, req) + w.upid = upres.GetUploadId() + return err + }, w.settings.retry, w.settings.idempotent, setRetryHeaderGRPC(w.ctx)) +} + +// queryProgress is a helper that queries the status of the resumable upload +// associated with the given upload ID. +func (w *gRPCWriter) queryProgress() (int64, error) { + var persistedSize int64 + err := run(w.ctx, func() error { + q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{ + UploadId: w.upid, + }) + persistedSize = q.GetPersistedSize() + return err + }, w.settings.retry, true, setRetryHeaderGRPC(w.ctx)) + + // q.GetCommittedSize() will return 0 if q is nil. + return persistedSize, err +} + +// uploadBuffer opens a Write stream and uploads the buffer at the given offset (if +// uploading a chunk for a resumable uploadBuffer), and will mark the write as +// finished if we are done receiving data from the user. The resulting write +// offset after uploading the buffer is returned, as well as a boolean +// indicating if the Object has been finalized. If it has been finalized, the +// final Object will be returned as well. Finalizing the upload is primarily +// important for Resumable Uploads. A simple or multi-part upload will always +// be finalized once the entire buffer has been written. +func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*storagepb.Object, int64, bool, error) { + var err error + var finishWrite bool + var sent, limit int = 0, maxPerMessageWriteSize + var shouldRetry = ShouldRetry + if w.settings.retry != nil && w.settings.retry.shouldRetry != nil { + shouldRetry = w.settings.retry.shouldRetry + } + offset := start + toWrite := w.buf[:recvd] + for { + first := sent == 0 + // This indicates that this is the last message and the remaining + // data fits in one message. + belowLimit := recvd-sent <= limit + if belowLimit { + limit = recvd - sent + } + if belowLimit && doneReading { + finishWrite = true + } + + // Prepare chunk section for upload. + data := toWrite[sent : sent+limit] + req := &storagepb.WriteObjectRequest{ + Data: &storagepb.WriteObjectRequest_ChecksummedData{ + ChecksummedData: &storagepb.ChecksummedData{ + Content: data, + }, + }, + WriteOffset: offset, + FinishWrite: finishWrite, + } + + // Open a new stream and set the first_message field on the request. + // The first message on the WriteObject stream must either be the + // Object or the Resumable Upload ID. + if first { + ctx := gapic.InsertMetadata(w.ctx, metadata.Pairs("x-goog-request-params", "bucket="+url.QueryEscape(w.bucket))) + w.stream, err = w.c.raw.WriteObject(ctx) + if err != nil { + return nil, 0, false, err + } + + if w.upid != "" { + req.FirstMessage = &storagepb.WriteObjectRequest_UploadId{UploadId: w.upid} + } else { + spec, err := w.writeObjectSpec() + if err != nil { + return nil, 0, false, err + } + req.FirstMessage = &storagepb.WriteObjectRequest_WriteObjectSpec{ + WriteObjectSpec: spec, + } + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(w.encryptionKey) + // For a non-resumable upload, checksums must be sent in this message. + // TODO: Currently the checksums are only sent on the first message + // of the stream, but in the future, we must also support sending it + // on the *last* message of the stream (instead of the first). + req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) + } + + } + + err = w.stream.Send(req) + if err == io.EOF { + // err was io.EOF. The client-side of a stream only gets an EOF on Send + // when the backend closes the stream and wants to return an error + // status. Closing the stream receives the status as an error. + _, err = w.stream.CloseAndRecv() + + // Retriable errors mean we should start over and attempt to + // resend the entire buffer via a new stream. + // If not retriable, falling through will return the error received + // from closing the stream. + if shouldRetry(err) { + sent = 0 + finishWrite = false + // TODO: Add test case for failure modes of querying progress. + offset, err = w.determineOffset(start) + if err == nil { + continue + } + } + } + if err != nil { + return nil, 0, false, err + } + + // Update the immediate stream's sent total and the upload offset with + // the data sent. + sent += len(data) + offset += int64(len(data)) + + // Not done sending data, do not attempt to commit it yet, loop around + // and send more data. + if recvd-sent > 0 { + continue + } + + // Done sending data. Close the stream to "commit" the data sent. + resp, finalized, err := w.commit() + // Retriable errors mean we should start over and attempt to + // resend the entire buffer via a new stream. + // If not retriable, falling through will return the error received + // from closing the stream. + if shouldRetry(err) { + sent = 0 + finishWrite = false + offset, err = w.determineOffset(start) + if err == nil { + continue + } + } + if err != nil { + return nil, 0, false, err + } + + return resp.GetResource(), offset, finalized, nil + } +} + +// determineOffset either returns the offset given to it in the case of a simple +// upload, or queries the write status in the case a resumable upload is being +// used. +func (w *gRPCWriter) determineOffset(offset int64) (int64, error) { + // For a Resumable Upload, we must start from however much data + // was committed. + if w.upid != "" { + committed, err := w.queryProgress() + if err != nil { + return 0, err + } + offset = committed + } + return offset, nil +} + +// commit closes the stream to commit the data sent and potentially receive +// the finalized object if finished uploading. If the last request sent +// indicated that writing was finished, the Object will be finalized and +// returned. If not, then the Object will be nil, and the boolean returned will +// be false. +func (w *gRPCWriter) commit() (*storagepb.WriteObjectResponse, bool, error) { + finalized := true + resp, err := w.stream.CloseAndRecv() + if err == io.EOF { + // Closing a stream for a resumable upload finish_write = false results + // in an EOF which can be ignored, as we aren't done uploading yet. + finalized = false + err = nil + } + // Drop the stream reference as it has been closed. + w.stream = nil + + return resp, finalized, err +} + +// writeObjectSpec constructs a WriteObjectSpec proto using the Writer's +// ObjectAttrs and applies its Conditions. This is only used for gRPC. +func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) { + // To avoid modifying the ObjectAttrs embeded in the calling writer, deref + // the ObjectAttrs pointer to make a copy, then assign the desired name to + // the attribute. + attrs := *w.attrs + + spec := &storagepb.WriteObjectSpec{ + Resource: attrs.toProtoObject(w.bucket), + } + // WriteObject doesn't support the generation condition, so use default. + if err := applyCondsProto("WriteObject", defaultGen, w.conds, spec); err != nil { + return nil, err + } + return spec, nil +} + +// read copies the data in the reader to the given buffer and reports how much +// data was read into the buffer and if there is no more data to read (EOF). +// Furthermore, if the attrs.ContentType is unset, the first bytes of content +// will be sniffed for a matching content type. +func (w *gRPCWriter) read() (int, bool, error) { + if w.attrs.ContentType == "" { + w.reader, w.attrs.ContentType = gax.DetermineContentType(w.reader) + } + // Set n to -1 to start the Read loop. + var n, recvd int = -1, 0 + var err error + for err == nil && n != 0 { + // The routine blocks here until data is received. + n, err = w.reader.Read(w.buf[recvd:]) + recvd += n + } + var done bool + if err == io.EOF { + done = true + err = nil + } + return recvd, done, err +} + +func checkCanceled(err error) error { + if status.Code(err) == codes.Canceled { + return context.Canceled + } + + return err +} diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go index 7d8185f37b810..422a7c2335bdc 100644 --- a/vendor/cloud.google.com/go/storage/hmac.go +++ b/vendor/cloud.google.com/go/storage/hmac.go @@ -20,6 +20,7 @@ import ( "fmt" "time" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" "google.golang.org/api/iterator" raw "google.golang.org/api/storage/v1" ) @@ -89,8 +90,8 @@ type HMACKey struct { type HMACKeyHandle struct { projectID string accessID string - - raw *raw.ProjectsHmacKeysService + retry *retryConfig + tc storageClient } // HMACKeyHandle creates a handle that will be used for HMACKey operations. @@ -100,7 +101,8 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { return &HMACKeyHandle{ projectID: projectID, accessID: accessID, - raw: raw.NewProjectsHmacKeysService(c.raw), + retry: c.retry, + tc: c.tc, } } @@ -112,32 +114,15 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { // // This method is EXPERIMENTAL and subject to change or removal without notice. func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) { - call := hkh.raw.Get(hkh.projectID, hkh.accessID) - desc := new(hmacKeyDesc) for _, opt := range opts { opt.withHMACKeyDesc(desc) } - if desc.userProjectID != "" { - call = call.UserProject(desc.userProjectID) - } - setClientHeader(call.Header()) - - var metadata *raw.HmacKeyMetadata - var err error - err = runWithRetry(ctx, func() error { - metadata, err = call.Context(ctx).Do() - return err - }) - if err != nil { - return nil, err - } + o := makeStorageOpts(true, hkh.retry, desc.userProjectID) + hk, err := hkh.tc.GetHMACKey(ctx, hkh.projectID, hkh.accessID, o...) - hkPb := &raw.HmacKey{ - Metadata: metadata, - } - return pbHmacKeyToHMACKey(hkPb, false) + return hk, err } // Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage. @@ -146,49 +131,59 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC // // This method is EXPERIMENTAL and subject to change or removal without notice. func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error { - delCall := hkh.raw.Delete(hkh.projectID, hkh.accessID) desc := new(hmacKeyDesc) for _, opt := range opts { opt.withHMACKeyDesc(desc) } - if desc.userProjectID != "" { - delCall = delCall.UserProject(desc.userProjectID) - } - setClientHeader(delCall.Header()) - return runWithRetry(ctx, func() error { - return delCall.Context(ctx).Do() - }) + o := makeStorageOpts(true, hkh.retry, desc.userProjectID) + return hkh.tc.DeleteHMACKey(ctx, hkh.projectID, hkh.accessID, o...) } -func pbHmacKeyToHMACKey(pb *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) { - pbmd := pb.Metadata - if pbmd == nil { +func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) { + hkmd := hk.Metadata + if hkmd == nil { return nil, errors.New("field Metadata cannot be nil") } - createdTime, err := time.Parse(time.RFC3339, pbmd.TimeCreated) + createdTime, err := time.Parse(time.RFC3339, hkmd.TimeCreated) if err != nil { - return nil, fmt.Errorf("field CreatedTime: %v", err) + return nil, fmt.Errorf("field CreatedTime: %w", err) } - updatedTime, err := time.Parse(time.RFC3339, pbmd.Updated) + updatedTime, err := time.Parse(time.RFC3339, hkmd.Updated) if err != nil && !updatedTimeCanBeNil { - return nil, fmt.Errorf("field UpdatedTime: %v", err) + return nil, fmt.Errorf("field UpdatedTime: %w", err) } - hmk := &HMACKey{ - AccessID: pbmd.AccessId, - Secret: pb.Secret, - Etag: pbmd.Etag, - ID: pbmd.Id, - State: HMACState(pbmd.State), - ProjectID: pbmd.ProjectId, + hmKey := &HMACKey{ + AccessID: hkmd.AccessId, + Secret: hk.Secret, + Etag: hkmd.Etag, + ID: hkmd.Id, + State: HMACState(hkmd.State), + ProjectID: hkmd.ProjectId, CreatedTime: createdTime, UpdatedTime: updatedTime, - ServiceAccountEmail: pbmd.ServiceAccountEmail, + ServiceAccountEmail: hkmd.ServiceAccountEmail, } - return hmk, nil + return hmKey, nil +} + +func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey { + if pbmd == nil { + return nil + } + + return &HMACKey{ + AccessID: pbmd.GetAccessId(), + ID: pbmd.GetId(), + State: HMACState(pbmd.GetState()), + ProjectID: pbmd.GetProject(), + CreatedTime: convertProtoTime(pbmd.GetCreateTime()), + UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()), + ServiceAccountEmail: pbmd.GetServiceAccountEmail(), + } } // CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey. @@ -202,29 +197,14 @@ func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEma return nil, errors.New("storage: expecting a non-blank service account email") } - svc := raw.NewProjectsHmacKeysService(c.raw) - call := svc.Create(projectID, serviceAccountEmail) desc := new(hmacKeyDesc) for _, opt := range opts { opt.withHMACKeyDesc(desc) } - if desc.userProjectID != "" { - call = call.UserProject(desc.userProjectID) - } - - setClientHeader(call.Header()) - - var hkPb *raw.HmacKey - var err error - err = runWithRetry(ctx, func() error { - hkPb, err = call.Context(ctx).Do() - return err - }) - if err != nil { - return nil, err - } - return pbHmacKeyToHMACKey(hkPb, true) + o := makeStorageOpts(false, c.retry, desc.userProjectID) + hk, err := c.tc.CreateHMACKey(ctx, projectID, serviceAccountEmail, o...) + return hk, err } // HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated. @@ -246,34 +226,15 @@ func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opt return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive) } - call := h.raw.Update(h.projectID, h.accessID, &raw.HmacKeyMetadata{ - Etag: au.Etag, - State: string(au.State), - }) - desc := new(hmacKeyDesc) for _, opt := range opts { opt.withHMACKeyDesc(desc) } - if desc.userProjectID != "" { - call = call.UserProject(desc.userProjectID) - } - setClientHeader(call.Header()) - var metadata *raw.HmacKeyMetadata - var err error - err = runWithRetry(ctx, func() error { - metadata, err = call.Context(ctx).Do() - return err - }) - - if err != nil { - return nil, err - } - hkPb := &raw.HmacKey{ - Metadata: metadata, - } - return pbHmacKeyToHMACKey(hkPb, false) + isIdempotent := len(au.Etag) > 0 + o := makeStorageOpts(isIdempotent, h.retry, desc.userProjectID) + hk, err := h.tc.UpdateHMACKey(ctx, h.projectID, desc.forServiceAccountEmail, h.accessID, &au, o...) + return hk, err } // An HMACKeysIterator is an iterator over HMACKeys. @@ -290,6 +251,7 @@ type HMACKeysIterator struct { nextFunc func() error index int desc hmacKeyDesc + retry *retryConfig } // ListHMACKeys returns an iterator for listing HMACKeys. @@ -298,26 +260,13 @@ type HMACKeysIterator struct { // // This method is EXPERIMENTAL and subject to change or removal without notice. func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator { - it := &HMACKeysIterator{ - ctx: ctx, - raw: raw.NewProjectsHmacKeysService(c.raw), - projectID: projectID, - } - + desc := new(hmacKeyDesc) for _, opt := range opts { - opt.withHMACKeyDesc(&it.desc) + opt.withHMACKeyDesc(desc) } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - it.fetch, - func() int { return len(it.hmacKeys) - it.index }, - func() interface{} { - prev := it.hmacKeys - it.hmacKeys = it.hmacKeys[:0] - it.index = 0 - return prev - }) - return it + o := makeStorageOpts(true, c.retry, desc.userProjectID) + return c.tc.ListHMACKeys(ctx, projectID, desc.forServiceAccountEmail, desc.showDeletedKeys, o...) } // Next returns the next result. Its second return value is iterator.Done if @@ -346,6 +295,8 @@ func (it *HMACKeysIterator) Next() (*HMACKey, error) { func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) { + // TODO: Remove fetch method upon integration. This method is internalized into + // httpStorageClient.ListHMACKeys() as it is the only caller. call := it.raw.List(it.projectID) setClientHeader(call.Header()) if pageToken != "" { @@ -366,19 +317,19 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, ctx := it.ctx var resp *raw.HmacKeysMetadata - err = runWithRetry(it.ctx, func() error { + err = run(it.ctx, func() error { resp, err = call.Context(ctx).Do() return err - }) + }, it.retry, true, setRetryHeaderHTTP(call)) if err != nil { return "", err } for _, metadata := range resp.Items { - hkPb := &raw.HmacKey{ + hk := &raw.HmacKey{ Metadata: metadata, } - hkey, err := pbHmacKeyToHMACKey(hkPb, true) + hkey, err := toHMACKeyFromRaw(hk, true) if err != nil { return "", err } diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go new file mode 100644 index 0000000000000..fae96043a92a1 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -0,0 +1,1351 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "reflect" + "strconv" + "strings" + "time" + + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/trace" + "golang.org/x/oauth2/google" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + raw "google.golang.org/api/storage/v1" + "google.golang.org/api/transport" + htransport "google.golang.org/api/transport/http" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +// httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic +// storageClient interface. +// +// This is an experimental API and not intended for public use. +type httpStorageClient struct { + creds *google.Credentials + hc *http.Client + readHost string + raw *raw.Service + scheme string + settings *settings +} + +// newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON +// Storage API. +// +// This is an experimental API and not intended for public use. +func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { + s := initSettings(opts...) + o := s.clientOption + + var creds *google.Credentials + // In general, it is recommended to use raw.NewService instead of htransport.NewClient + // since raw.NewService configures the correct default endpoints when initializing the + // internal http client. However, in our case, "NewRangeReader" in reader.go needs to + // access the http client directly to make requests, so we create the client manually + // here so it can be re-used by both reader.go and raw.NewService. This means we need to + // manually configure the default endpoint options on the http client. Furthermore, we + // need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints. + if host := os.Getenv("STORAGE_EMULATOR_HOST"); host == "" { + // Prepend default options to avoid overriding options passed by the user. + o = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, o...) + + o = append(o, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/")) + o = append(o, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/")) + + // Don't error out here. The user may have passed in their own HTTP + // client which does not auth with ADC or other common conventions. + c, err := transport.Creds(ctx, o...) + if err == nil { + creds = c + o = append(o, internaloption.WithCredentials(creds)) + } + } else { + var hostURL *url.URL + + if strings.Contains(host, "://") { + h, err := url.Parse(host) + if err != nil { + return nil, err + } + hostURL = h + } else { + // Add scheme for user if not supplied in STORAGE_EMULATOR_HOST + // URL is only parsed correctly if it has a scheme, so we build it ourselves + hostURL = &url.URL{Scheme: "http", Host: host} + } + + hostURL.Path = "storage/v1/" + endpoint := hostURL.String() + + // Append the emulator host as default endpoint for the user + o = append([]option.ClientOption{option.WithoutAuthentication()}, o...) + + o = append(o, internaloption.WithDefaultEndpoint(endpoint)) + o = append(o, internaloption.WithDefaultMTLSEndpoint(endpoint)) + } + s.clientOption = o + + // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. + hc, ep, err := htransport.NewClient(ctx, s.clientOption...) + if err != nil { + return nil, fmt.Errorf("dialing: %w", err) + } + // RawService should be created with the chosen endpoint to take account of user override. + rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc)) + if err != nil { + return nil, fmt.Errorf("storage client: %w", err) + } + // Update readHost and scheme with the chosen endpoint. + u, err := url.Parse(ep) + if err != nil { + return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) + } + + return &httpStorageClient{ + creds: creds, + hc: hc, + readHost: u.Host, + raw: rawService, + scheme: u.Scheme, + settings: s, + }, nil +} + +func (c *httpStorageClient) Close() error { + c.hc.CloseIdleConnections() + return nil +} + +// Top-level methods. + +func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.ServiceAccount.Get(project) + var res *raw.ServiceAccount + err := run(ctx, func() error { + var err error + res, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + if err != nil { + return "", err + } + return res.EmailAddress, nil +} + +func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + var bkt *raw.Bucket + if attrs != nil { + bkt = attrs.toRawBucket() + } else { + bkt = &raw.Bucket{} + } + bkt.Name = bucket + // If there is lifecycle information but no location, explicitly set + // the location. This is a GCS quirk/bug. + if bkt.Location == "" && bkt.Lifecycle != nil { + bkt.Location = "US" + } + req := c.raw.Buckets.Insert(project, bkt) + setClientHeader(req.Header()) + if attrs != nil && attrs.PredefinedACL != "" { + req.PredefinedAcl(attrs.PredefinedACL) + } + if attrs != nil && attrs.PredefinedDefaultObjectACL != "" { + req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL) + } + var battrs *BucketAttrs + err := run(ctx, func() error { + b, err := req.Context(ctx).Do() + if err != nil { + return err + } + battrs, err = newBucket(b) + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + return battrs, err +} + +func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator { + s := callSettings(c.settings, opts...) + it := &BucketIterator{ + ctx: ctx, + projectID: project, + } + + fetch := func(pageSize int, pageToken string) (token string, err error) { + req := c.raw.Buckets.List(it.projectID) + setClientHeader(req.Header()) + req.Projection("full") + req.Prefix(it.Prefix) + req.PageToken(pageToken) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Buckets + err = run(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + if err != nil { + return "", err + } + for _, item := range resp.Items { + b, err := newBucket(item) + if err != nil { + return "", err + } + it.buckets = append(it.buckets, b) + } + return resp.NextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.buckets) }, + func() interface{} { b := it.buckets; it.buckets = nil; return b }) + + return it +} + +// Bucket methods. + +func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := c.raw.Buckets.Delete(bucket) + setClientHeader(req.Header()) + if err := applyBucketConds("httpStorageClient.DeleteBucket", conds, req); err != nil { + return err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + + return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) +} + +func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + req := c.raw.Buckets.Get(bucket).Projection("full") + setClientHeader(req.Header()) + err := applyBucketConds("httpStorageClient.GetBucket", conds, req) + if err != nil { + return nil, err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + + var resp *raw.Bucket + err = run(ctx, func() error { + resp, err = req.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + return nil, ErrBucketNotExist + } + if err != nil { + return nil, err + } + return newBucket(resp) +} +func (c *httpStorageClient) UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + rb := uattrs.toRawBucket() + req := c.raw.Buckets.Patch(bucket, rb).Projection("full") + setClientHeader(req.Header()) + err := applyBucketConds("httpStorageClient.UpdateBucket", conds, req) + if err != nil { + return nil, err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + if uattrs != nil && uattrs.PredefinedACL != "" { + req.PredefinedAcl(uattrs.PredefinedACL) + } + if uattrs != nil && uattrs.PredefinedDefaultObjectACL != "" { + req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL) + } + + var rawBucket *raw.Bucket + err = run(ctx, func() error { + rawBucket, err = req.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + if err != nil { + return nil, err + } + return newBucket(rawBucket) +} + +func (c *httpStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + + var metageneration int64 + if conds != nil { + metageneration = conds.MetagenerationMatch + } + req := c.raw.Buckets.LockRetentionPolicy(bucket, metageneration) + + return run(ctx, func() error { + _, err := req.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) +} +func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator { + s := callSettings(c.settings, opts...) + it := &ObjectIterator{ + ctx: ctx, + } + if q != nil { + it.query = *q + } + fetch := func(pageSize int, pageToken string) (string, error) { + req := c.raw.Objects.List(bucket) + setClientHeader(req.Header()) + projection := it.query.Projection + if projection == ProjectionDefault { + projection = ProjectionFull + } + req.Projection(projection.String()) + req.Delimiter(it.query.Delimiter) + req.Prefix(it.query.Prefix) + req.StartOffset(it.query.StartOffset) + req.EndOffset(it.query.EndOffset) + req.Versions(it.query.Versions) + req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter) + if selection := it.query.toFieldSelection(); selection != "" { + req.Fields("nextPageToken", googleapi.Field(selection)) + } + req.PageToken(pageToken) + if s.userProject != "" { + req.UserProject(s.userProject) + } + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Objects + var err error + err = run(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + if err != nil { + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + err = ErrBucketNotExist + } + return "", err + } + for _, item := range resp.Items { + it.items = append(it.items, newObject(item)) + } + for _, prefix := range resp.Prefixes { + it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) + } + return resp.NextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + + return it +} + +// Object metadata methods. + +func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := c.raw.Objects.Delete(bucket, object).Context(ctx) + if err := applyConds("Delete", gen, conds, req); err != nil { + return err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + err := run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + return ErrObjectNotExist + } + return err +} + +func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + req := c.raw.Objects.Get(bucket, object).Projection("full").Context(ctx) + if err := applyConds("Attrs", gen, conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + if err := setEncryptionHeaders(req.Header(), encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + var err error + err = run(ctx, func() error { + obj, err = req.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + + var attrs ObjectAttrs + // Lists of fields to send, and set to null, in the JSON. + var forceSendFields, nullFields []string + if uattrs.ContentType != nil { + attrs.ContentType = optional.ToString(uattrs.ContentType) + // For ContentType, sending the empty string is a no-op. + // Instead we send a null. + if attrs.ContentType == "" { + nullFields = append(nullFields, "ContentType") + } else { + forceSendFields = append(forceSendFields, "ContentType") + } + } + if uattrs.ContentLanguage != nil { + attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) + // For ContentLanguage it's an error to send the empty string. + // Instead we send a null. + if attrs.ContentLanguage == "" { + nullFields = append(nullFields, "ContentLanguage") + } else { + forceSendFields = append(forceSendFields, "ContentLanguage") + } + } + if uattrs.ContentEncoding != nil { + attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) + forceSendFields = append(forceSendFields, "ContentEncoding") + } + if uattrs.ContentDisposition != nil { + attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) + forceSendFields = append(forceSendFields, "ContentDisposition") + } + if uattrs.CacheControl != nil { + attrs.CacheControl = optional.ToString(uattrs.CacheControl) + forceSendFields = append(forceSendFields, "CacheControl") + } + if uattrs.EventBasedHold != nil { + attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold) + forceSendFields = append(forceSendFields, "EventBasedHold") + } + if uattrs.TemporaryHold != nil { + attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) + forceSendFields = append(forceSendFields, "TemporaryHold") + } + if !uattrs.CustomTime.IsZero() { + attrs.CustomTime = uattrs.CustomTime + forceSendFields = append(forceSendFields, "CustomTime") + } + if uattrs.Metadata != nil { + attrs.Metadata = uattrs.Metadata + if len(attrs.Metadata) == 0 { + // Sending the empty map is a no-op. We send null instead. + nullFields = append(nullFields, "Metadata") + } else { + forceSendFields = append(forceSendFields, "Metadata") + } + } + if uattrs.ACL != nil { + attrs.ACL = uattrs.ACL + // It's an error to attempt to delete the ACL, so + // we don't append to nullFields here. + forceSendFields = append(forceSendFields, "Acl") + } + rawObj := attrs.toRawObject(bucket) + rawObj.ForceSendFields = forceSendFields + rawObj.NullFields = nullFields + call := c.raw.Objects.Patch(bucket, object, rawObj).Projection("full").Context(ctx) + if err := applyConds("Update", gen, conds, call); err != nil { + return nil, err + } + if s.userProject != "" { + call.UserProject(s.userProject) + } + if uattrs.PredefinedACL != "" { + call.PredefinedAcl(uattrs.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + var err error + err = run(ctx, func() error { obj, err = call.Do(); return err }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + var e *googleapi.Error + if errors.As(err, &e) && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// Default Object ACL methods. + +func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := c.raw.DefaultObjectAccessControls.Delete(bucket, string(entity)) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) +} + +func (c *httpStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { + s := callSettings(c.settings, opts...) + var acls *raw.ObjectAccessControls + var err error + req := c.raw.DefaultObjectAccessControls.List(bucket) + configureACLCall(ctx, s.userProject, req) + err = run(ctx, func() error { + acls, err = req.Do() + return err + }, s.retry, true, setRetryHeaderHTTP(req)) + if err != nil { + return nil, err + } + return toObjectACLRules(acls.Items), nil +} +func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + type setRequest interface { + Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) + Header() http.Header + } + acl := &raw.ObjectAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + var req setRequest + var err error + req = c.raw.DefaultObjectAccessControls.Update(bucket, string(entity), acl) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func() error { + _, err = req.Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) +} + +// Bucket ACL methods. + +func (c *httpStorageClient) DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := c.raw.BucketAccessControls.Delete(bucket, string(entity)) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) +} + +func (c *httpStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { + s := callSettings(c.settings, opts...) + var acls *raw.BucketAccessControls + var err error + req := c.raw.BucketAccessControls.List(bucket) + configureACLCall(ctx, s.userProject, req) + err = run(ctx, func() error { + acls, err = req.Do() + return err + }, s.retry, true, setRetryHeaderHTTP(req)) + if err != nil { + return nil, err + } + return toBucketACLRules(acls.Items), nil +} + +func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + acl := &raw.BucketAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + req := c.raw.BucketAccessControls.Update(bucket, string(entity), acl) + configureACLCall(ctx, s.userProject, req) + var err error + return run(ctx, func() error { + _, err = req.Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) +} + +// configureACLCall sets the context, user project and headers on the apiary library call. +// This will panic if the call does not have the correct methods. +func configureACLCall(ctx context.Context, userProject string, call interface{ Header() http.Header }) { + vc := reflect.ValueOf(call) + vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) + if userProject != "" { + vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(userProject)}) + } + setClientHeader(call.Header()) +} + +// Object ACL methods. + +func (c *httpStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := c.raw.ObjectAccessControls.Delete(bucket, object, string(entity)) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) +} + +// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. +// Selecting a specific generation of this object is not currently supported by the client. +func (c *httpStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) { + s := callSettings(c.settings, opts...) + var acls *raw.ObjectAccessControls + var err error + req := c.raw.ObjectAccessControls.List(bucket, object) + configureACLCall(ctx, s.userProject, req) + err = run(ctx, func() error { + acls, err = req.Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + if err != nil { + return nil, err + } + return toObjectACLRules(acls.Items), nil +} + +func (c *httpStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + type setRequest interface { + Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) + Header() http.Header + } + + acl := &raw.ObjectAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + var req setRequest + var err error + req = c.raw.ObjectAccessControls.Update(bucket, object, string(entity), acl) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func() error { + _, err = req.Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) +} + +// Media operations. + +func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + rawReq := &raw.ComposeRequest{} + // Compose requires a non-empty Destination, so we always set it, + // even if the caller-provided ObjectAttrs is the zero value. + rawReq.Destination = req.dstObject.attrs.toRawObject(req.dstBucket) + if req.sendCRC32C { + rawReq.Destination.Crc32c = encodeUint32(req.dstObject.attrs.CRC32C) + } + for _, src := range req.srcs { + srcObj := &raw.ComposeRequestSourceObjects{ + Name: src.name, + } + if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { + return nil, err + } + rawReq.SourceObjects = append(rawReq.SourceObjects, srcObj) + } + + call := c.raw.Objects.Compose(req.dstBucket, req.dstObject.name, rawReq).Context(ctx) + if err := applyConds("ComposeFrom destination", defaultGen, req.dstObject.conds, call); err != nil { + return nil, err + } + if s.userProject != "" { + call.UserProject(s.userProject) + } + if req.predefinedACL != "" { + call.DestinationPredefinedAcl(req.predefinedACL) + } + if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + setClientHeader(call.Header()) + + var err error + retryCall := func() error { obj, err = call.Do(); return err } + + if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + return nil, err + } + return newObject(obj), nil +} +func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) { + s := callSettings(c.settings, opts...) + rawObject := req.dstObject.attrs.toRawObject("") + call := c.raw.Objects.Rewrite(req.srcObject.bucket, req.srcObject.name, req.dstObject.bucket, req.dstObject.name, rawObject) + + call.Context(ctx).Projection("full") + if req.token != "" { + call.RewriteToken(req.token) + } + if req.dstObject.keyName != "" { + call.DestinationKmsKeyName(req.dstObject.keyName) + } + if req.predefinedACL != "" { + call.DestinationPredefinedAcl(req.predefinedACL) + } + if err := applyConds("Copy destination", defaultGen, req.dstObject.conds, call); err != nil { + return nil, err + } + if err := applySourceConds(req.srcObject.gen, req.srcObject.conds, call); err != nil { + return nil, err + } + if s.userProject != "" { + call.UserProject(s.userProject) + } + // Set destination encryption headers. + if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil { + return nil, err + } + // Set source encryption headers. + if err := setEncryptionHeaders(call.Header(), req.srcObject.encryptionKey, true); err != nil { + return nil, err + } + + if req.maxBytesRewrittenPerCall != 0 { + call.MaxBytesRewrittenPerCall(req.maxBytesRewrittenPerCall) + } + + var res *raw.RewriteResponse + var err error + setClientHeader(call.Header()) + + retryCall := func() error { res, err = call.Do(); return err } + + if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + return nil, err + } + + r := &rewriteObjectResponse{ + done: res.Done, + written: res.TotalBytesRewritten, + size: res.ObjectSize, + token: res.RewriteToken, + resource: newObject(res.Resource), + } + + return r, nil +} + +func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.NewRangeReader") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + + u := &url.URL{ + Scheme: c.scheme, + Host: c.readHost, + Path: fmt.Sprintf("/%s/%s", params.bucket, params.object), + } + verb := "GET" + if params.length == 0 { + verb = "HEAD" + } + req, err := http.NewRequest(verb, u.String(), nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if s.userProject != "" { + req.Header.Set("X-Goog-User-Project", s.userProject) + } + if params.readCompressed { + req.Header.Set("Accept-Encoding", "gzip") + } + if err := setEncryptionHeaders(req.Header, params.encryptionKey, false); err != nil { + return nil, err + } + + // Define a function that initiates a Read with offset and length, assuming we + // have already read seen bytes. + reopen := func(seen int64) (*http.Response, error) { + // If the context has already expired, return immediately without making a + // call. + if err := ctx.Err(); err != nil { + return nil, err + } + start := params.offset + seen + if params.length < 0 && start < 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d", start)) + } else if params.length < 0 && start > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start)) + } else if params.length > 0 { + // The end character isn't affected by how many bytes we've seen. + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, params.offset+params.length-1)) + } + // We wait to assign conditions here because the generation number can change in between reopen() runs. + if err := setConditionsHeaders(req.Header, params.conds); err != nil { + return nil, err + } + // If an object generation is specified, include generation as query string parameters. + if params.gen >= 0 { + req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) + } + + var res *http.Response + err = run(ctx, func() error { + res, err = c.hc.Do(req) + if err != nil { + return err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + + partialContentNotSatisfied := + !decompressiveTranscoding(res) && + start > 0 && params.length != 0 && + res.StatusCode != http.StatusPartialContent + + if partialContentNotSatisfied { + res.Body.Close() + return errors.New("storage: partial request not satisfied") + } + + // With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves + // back the whole file regardless of the range count passed in as per: + // https://cloud.google.com/storage/docs/transcoding#range, + // thus we have to manually move the body forward by seen bytes. + if decompressiveTranscoding(res) && seen > 0 { + _, _ = io.CopyN(ioutil.Discard, res.Body, seen) + } + + // If a generation hasn't been specified, and this is the first response we get, let's record the + // generation. In future requests we'll use this generation as a precondition to avoid data races. + if params.gen < 0 && res.Header.Get("X-Goog-Generation") != "" { + gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) + if err != nil { + return err + } + params.gen = gen64 + } + return nil + }, s.retry, s.idempotent, setRetryHeaderHTTP(nil)) + if err != nil { + return nil, err + } + return res, nil + } + + res, err := reopen(0) + if err != nil { + return nil, err + } + var ( + size int64 // total size of object, even if a range was requested. + checkCRC bool + crc uint32 + startOffset int64 // non-zero if range request. + ) + if res.StatusCode == http.StatusPartialContent { + cr := strings.TrimSpace(res.Header.Get("Content-Range")) + if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + // Content range is formatted -/. We take + // the total size. + size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + + dashIndex := strings.Index(cr, "-") + if dashIndex >= 0 { + startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q: %w", cr, err) + } + } + } else { + size = res.ContentLength + // Check the CRC iff all of the following hold: + // - We asked for content (length != 0). + // - We got all the content (status != PartialContent). + // - The server sent a CRC header. + // - The Go http stack did not uncompress the file. + // - We were not served compressed data that was uncompressed on download. + // The problem with the last two cases is that the CRC will not match -- GCS + // computes it on the compressed contents, but we compute it on the + // uncompressed contents. + if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) { + crc, checkCRC = parseCRC32c(res) + } + } + + remain := res.ContentLength + body := res.Body + if params.length == 0 { + remain = 0 + body.Close() + body = emptyBody + } + var metaGen int64 + if res.Header.Get("X-Goog-Metageneration") != "" { + metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) + if err != nil { + return nil, err + } + } + + var lm time.Time + if res.Header.Get("Last-Modified") != "" { + lm, err = http.ParseTime(res.Header.Get("Last-Modified")) + if err != nil { + return nil, err + } + } + + attrs := ReaderObjectAttrs{ + Size: size, + ContentType: res.Header.Get("Content-Type"), + ContentEncoding: res.Header.Get("Content-Encoding"), + CacheControl: res.Header.Get("Cache-Control"), + LastModified: lm, + StartOffset: startOffset, + Generation: params.gen, + Metageneration: metaGen, + } + return &Reader{ + Attrs: attrs, + size: size, + remain: remain, + wantCRC: crc, + checkCRC: checkCRC, + reader: &httpReader{ + reopen: reopen, + body: body, + }, + }, nil +} + +func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { + s := callSettings(c.settings, opts...) + errorf := params.setError + setObj := params.setObj + progress := params.progress + attrs := params.attrs + + mediaOpts := []googleapi.MediaOption{ + googleapi.ChunkSize(params.chunkSize), + } + if c := attrs.ContentType; c != "" { + mediaOpts = append(mediaOpts, googleapi.ContentType(c)) + } + if params.chunkRetryDeadline != 0 { + mediaOpts = append(mediaOpts, googleapi.ChunkRetryDeadline(params.chunkRetryDeadline)) + } + + pr, pw := io.Pipe() + + go func() { + defer close(params.donec) + + rawObj := attrs.toRawObject(params.bucket) + if params.sendCRC32C { + rawObj.Crc32c = encodeUint32(attrs.CRC32C) + } + if attrs.MD5 != nil { + rawObj.Md5Hash = base64.StdEncoding.EncodeToString(attrs.MD5) + } + call := c.raw.Objects.Insert(params.bucket, rawObj). + Media(pr, mediaOpts...). + Projection("full"). + Context(params.ctx). + Name(params.attrs.Name) + call.ProgressUpdater(func(n, _ int64) { progress(n) }) + + if attrs.KMSKeyName != "" { + call.KmsKeyName(attrs.KMSKeyName) + } + if attrs.PredefinedACL != "" { + call.PredefinedAcl(attrs.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), params.encryptionKey, false); err != nil { + errorf(err) + pr.CloseWithError(err) + return + } + var resp *raw.Object + err := applyConds("NewWriter", defaultGen, params.conds, call) + if err == nil { + if s.userProject != "" { + call.UserProject(s.userProject) + } + // TODO(tritone): Remove this code when Uploads begin to support + // retry attempt header injection with "client header" injection. + setClientHeader(call.Header()) + + // The internals that perform call.Do automatically retry both the initial + // call to set up the upload as well as calls to upload individual chunks + // for a resumable upload (as long as the chunk size is non-zero). Hence + // there is no need to add retries here. + + // Retry only when the operation is idempotent or the retry policy is RetryAlways. + var useRetry bool + if (s.retry == nil || s.retry.policy == RetryIdempotent) && s.idempotent { + useRetry = true + } else if s.retry != nil && s.retry.policy == RetryAlways { + useRetry = true + } + if useRetry { + if s.retry != nil { + call.WithRetry(s.retry.backoff, s.retry.shouldRetry) + } else { + call.WithRetry(nil, nil) + } + } + resp, err = call.Do() + } + if err != nil { + errorf(err) + pr.CloseWithError(err) + return + } + setObj(newObject(resp)) + }() + + return pw, nil +} + +// IAM methods. + +func (c *httpStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(version)) + setClientHeader(call.Header()) + if s.userProject != "" { + call.UserProject(s.userProject) + } + var rp *raw.Policy + err := run(ctx, func() error { + var err error + rp, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + if err != nil { + return nil, err + } + return iamFromStoragePolicy(rp), nil +} + +func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + + rp := iamToStoragePolicy(policy) + call := c.raw.Buckets.SetIamPolicy(resource, rp) + setClientHeader(call.Header()) + if s.userProject != "" { + call.UserProject(s.userProject) + } + + return run(ctx, func() error { + _, err := call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) +} + +func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Buckets.TestIamPermissions(resource, permissions) + setClientHeader(call.Header()) + if s.userProject != "" { + call.UserProject(s.userProject) + } + var res *raw.TestIamPermissionsResponse + err := run(ctx, func() error { + var err error + res, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +// HMAC Key methods. + +func (c *httpStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.HmacKeys.Get(project, accessID) + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + + var metadata *raw.HmacKeyMetadata + var err error + if err := run(ctx, func() error { + metadata, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + return nil, err + } + hk := &raw.HmacKey{ + Metadata: metadata, + } + return toHMACKeyFromRaw(hk, false) +} + +func (c *httpStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator { + s := callSettings(c.settings, opts...) + it := &HMACKeysIterator{ + ctx: ctx, + raw: c.raw.Projects.HmacKeys, + projectID: project, + retry: s.retry, + } + fetch := func(pageSize int, pageToken string) (token string, err error) { + call := c.raw.Projects.HmacKeys.List(project) + setClientHeader(call.Header()) + if pageToken != "" { + call = call.PageToken(pageToken) + } + if pageSize > 0 { + call = call.MaxResults(int64(pageSize)) + } + if showDeletedKeys { + call = call.ShowDeletedKeys(true) + } + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + if serviceAccountEmail != "" { + call = call.ServiceAccountEmail(serviceAccountEmail) + } + + var resp *raw.HmacKeysMetadata + err = run(it.ctx, func() error { + resp, err = call.Context(it.ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + if err != nil { + return "", err + } + + for _, metadata := range resp.Items { + hk := &raw.HmacKey{ + Metadata: metadata, + } + hkey, err := toHMACKeyFromRaw(hk, true) + if err != nil { + return "", err + } + it.hmacKeys = append(it.hmacKeys, hkey) + } + return resp.NextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.hmacKeys) - it.index }, + func() interface{} { + prev := it.hmacKeys + it.hmacKeys = it.hmacKeys[:0] + it.index = 0 + return prev + }) + return it +} + +func (c *httpStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.HmacKeys.Update(project, accessID, &raw.HmacKeyMetadata{ + Etag: attrs.Etag, + State: string(attrs.State), + }) + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + + var metadata *raw.HmacKeyMetadata + var err error + if err := run(ctx, func() error { + metadata, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + return nil, err + } + hk := &raw.HmacKey{ + Metadata: metadata, + } + return toHMACKeyFromRaw(hk, false) +} + +func (c *httpStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.HmacKeys.Create(project, serviceAccountEmail) + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + + var hk *raw.HmacKey + if err := run(ctx, func() error { + h, err := call.Context(ctx).Do() + hk = h + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + return nil, err + } + return toHMACKeyFromRaw(hk, true) +} + +func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.HmacKeys.Delete(project, accessID) + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + return run(ctx, func() error { + return call.Context(ctx).Do() + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) +} + +// Notification methods. + +// ListNotifications returns all the Notifications configured for this bucket, as a map indexed by notification ID. +// +// Note: This API does not support pagination. However, entity limits cap the number of notifications on a single bucket, +// so all results will be returned in the first response. See https://cloud.google.com/storage/quotas#buckets. +func (c *httpStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.ListNotifications") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + call := c.raw.Notifications.List(bucket) + if s.userProject != "" { + call.UserProject(s.userProject) + } + var res *raw.Notifications + err = run(ctx, func() error { + res, err = call.Context(ctx).Do() + return err + }, s.retry, true, setRetryHeaderHTTP(call)) + if err != nil { + return nil, err + } + return notificationsToMap(res.Items), nil +} + +func (c *httpStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.CreateNotification") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + call := c.raw.Notifications.Insert(bucket, toRawNotification(n)) + if s.userProject != "" { + call.UserProject(s.userProject) + } + var rn *raw.Notification + err = run(ctx, func() error { + rn, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + if err != nil { + return nil, err + } + return toNotification(rn), nil +} + +func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.DeleteNotification") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + call := c.raw.Notifications.Delete(bucket, id) + if s.userProject != "" { + call.UserProject(s.userProject) + } + return run(ctx, func() error { + return call.Context(ctx).Do() + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) +} + +type httpReader struct { + body io.ReadCloser + seen int64 + reopen func(seen int64) (*http.Response, error) +} + +func (r *httpReader) Read(p []byte) (int, error) { + n := 0 + for len(p[n:]) > 0 { + m, err := r.body.Read(p[n:]) + n += m + r.seen += int64(m) + if err == nil || err == io.EOF { + return n, err + } + // Read failed (likely due to connection issues), but we will try to reopen + // the pipe and continue. Send a ranged read request that takes into account + // the number of bytes we've already seen. + res, err := r.reopen(r.seen) + if err != nil { + // reopen already retries + return n, err + } + r.body.Close() + r.body = res.Body + } + return n, nil +} + +func (r *httpReader) Close() error { + return r.body.Close() +} diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go index 5caefb059d595..408661718fcb7 100644 --- a/vendor/cloud.google.com/go/storage/iam.go +++ b/vendor/cloud.google.com/go/storage/iam.go @@ -27,15 +27,17 @@ import ( // IAM provides access to IAM access control for the bucket. func (b *BucketHandle) IAM() *iam.Handle { return iam.InternalNewHandleClient(&iamClient{ - raw: b.c.raw, userProject: b.userProject, + retry: b.retry, + client: b.c, }, b.name) } // iamClient implements the iam.client interface. type iamClient struct { - raw *raw.Service userProject string + retry *retryConfig + client *Client } func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) { @@ -46,56 +48,25 @@ func (c *iamClient) GetWithVersion(ctx context.Context, resource string, request ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get") defer func() { trace.EndSpan(ctx, err) }() - call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(requestedPolicyVersion)) - setClientHeader(call.Header()) - if c.userProject != "" { - call.UserProject(c.userProject) - } - var rp *raw.Policy - err = runWithRetry(ctx, func() error { - rp, err = call.Context(ctx).Do() - return err - }) - if err != nil { - return nil, err - } - return iamFromStoragePolicy(rp), nil + o := makeStorageOpts(true, c.retry, c.userProject) + return c.client.tc.GetIamPolicy(ctx, resource, requestedPolicyVersion, o...) } func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set") defer func() { trace.EndSpan(ctx, err) }() - rp := iamToStoragePolicy(p) - call := c.raw.Buckets.SetIamPolicy(resource, rp) - setClientHeader(call.Header()) - if c.userProject != "" { - call.UserProject(c.userProject) - } - return runWithRetry(ctx, func() error { - _, err := call.Context(ctx).Do() - return err - }) + isIdempotent := len(p.Etag) > 0 + o := makeStorageOpts(isIdempotent, c.retry, c.userProject) + return c.client.tc.SetIamPolicy(ctx, resource, p, o...) } func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test") defer func() { trace.EndSpan(ctx, err) }() - call := c.raw.Buckets.TestIamPermissions(resource, perms) - setClientHeader(call.Header()) - if c.userProject != "" { - call.UserProject(c.userProject) - } - var res *raw.TestIamPermissionsResponse - err = runWithRetry(ctx, func() error { - res, err = call.Context(ctx).Do() - return err - }) - if err != nil { - return nil, err - } - return res.Permissions, nil + o := makeStorageOpts(true, c.retry, c.userProject) + return c.client.tc.TestIamPermissions(ctx, resource, perms, o...) } func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go new file mode 100644 index 0000000000000..e33b5222ab387 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go @@ -0,0 +1,176 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +// Package storage is an auto-generated package for the +// Cloud Storage API. +// +// Lets you store and retrieve potentially-large, immutable data objects. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// # General documentation +// +// For information about setting deadlines, reusing contexts, and more +// please visit https://pkg.go.dev/cloud.google.com/go. +// +// # Example usage +// +// To get started with this package, create a client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := storage.NewClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// The client will use your default application credentials. Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +// The returned client must be Closed when it is done being used. +// +// # Using the Client +// +// The following is an example of making an API call with the newly created client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := storage.NewClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// req := &storagepb.DeleteBucketRequest{ +// // TODO: Fill request struct fields. +// // See https://pkg.go.dev/cloud.google.com/go/storage/internal/apiv2/stubs#DeleteBucketRequest. +// } +// err = c.DeleteBucket(ctx, req) +// if err != nil { +// // TODO: Handle error. +// } +// +// # Use of Context +// +// The ctx passed to NewClient is used for authentication requests and +// for creating the underlying connection, but is not used for subsequent calls. +// Individual methods on the client use the ctx given to them. +// +// To close the open connection, use the Close() method. +package storage // import "cloud.google.com/go/storage/internal/apiv2" + +import ( + "context" + "os" + "runtime" + "strconv" + "strings" + "unicode" + + "google.golang.org/api/option" + "google.golang.org/grpc/metadata" +) + +// For more information on implementing a client constructor hook, see +// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. +type clientHookParams struct{} +type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) + +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +func checkDisableDeadlines() (bool, error) { + raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE") + if !ok { + return false, nil + } + + b, err := strconv.ParseBool(raw) + return b, err +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write", + } +} + +// versionGo returns the Go runtime version. The returned string +// has no whitespace, suitable for reporting in header. +func versionGo() string { + const develPrefix = "devel +" + + s := runtime.Version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "UNKNOWN" +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json new file mode 100644 index 0000000000000..01103fa93bd14 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json @@ -0,0 +1,168 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", + "language": "go", + "protoPackage": "google.storage.v2", + "libraryPackage": "cloud.google.com/go/storage/internal/apiv2", + "services": { + "Storage": { + "clients": { + "grpc": { + "libraryClient": "Client", + "rpcs": { + "CancelResumableWrite": { + "methods": [ + "CancelResumableWrite" + ] + }, + "ComposeObject": { + "methods": [ + "ComposeObject" + ] + }, + "CreateBucket": { + "methods": [ + "CreateBucket" + ] + }, + "CreateHmacKey": { + "methods": [ + "CreateHmacKey" + ] + }, + "CreateNotification": { + "methods": [ + "CreateNotification" + ] + }, + "DeleteBucket": { + "methods": [ + "DeleteBucket" + ] + }, + "DeleteHmacKey": { + "methods": [ + "DeleteHmacKey" + ] + }, + "DeleteNotification": { + "methods": [ + "DeleteNotification" + ] + }, + "DeleteObject": { + "methods": [ + "DeleteObject" + ] + }, + "GetBucket": { + "methods": [ + "GetBucket" + ] + }, + "GetHmacKey": { + "methods": [ + "GetHmacKey" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetNotification": { + "methods": [ + "GetNotification" + ] + }, + "GetObject": { + "methods": [ + "GetObject" + ] + }, + "GetServiceAccount": { + "methods": [ + "GetServiceAccount" + ] + }, + "ListBuckets": { + "methods": [ + "ListBuckets" + ] + }, + "ListHmacKeys": { + "methods": [ + "ListHmacKeys" + ] + }, + "ListNotifications": { + "methods": [ + "ListNotifications" + ] + }, + "ListObjects": { + "methods": [ + "ListObjects" + ] + }, + "LockBucketRetentionPolicy": { + "methods": [ + "LockBucketRetentionPolicy" + ] + }, + "QueryWriteStatus": { + "methods": [ + "QueryWriteStatus" + ] + }, + "ReadObject": { + "methods": [ + "ReadObject" + ] + }, + "RewriteObject": { + "methods": [ + "RewriteObject" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "StartResumableWrite": { + "methods": [ + "StartResumableWrite" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "UpdateBucket": { + "methods": [ + "UpdateBucket" + ] + }, + "UpdateHmacKey": { + "methods": [ + "UpdateHmacKey" + ] + }, + "UpdateObject": { + "methods": [ + "UpdateObject" + ] + }, + "WriteObject": { + "methods": [ + "WriteObject" + ] + } + } + } + } + } + } +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go b/vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go new file mode 100644 index 0000000000000..6ff86c4fb49f9 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go @@ -0,0 +1,26 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +// InsertMetadata inserts the given gRPC metadata into the outgoing context. +func InsertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + return insertMetadata(ctx, mds...) +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go new file mode 100644 index 0000000000000..aa2c1aff57271 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -0,0 +1,1615 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package storage + +import ( + "context" + "fmt" + "math" + "net/url" + "regexp" + "strings" + + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + iampb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" +) + +var newClientHook clientHook + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + DeleteBucket []gax.CallOption + GetBucket []gax.CallOption + CreateBucket []gax.CallOption + ListBuckets []gax.CallOption + LockBucketRetentionPolicy []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption + UpdateBucket []gax.CallOption + DeleteNotification []gax.CallOption + GetNotification []gax.CallOption + CreateNotification []gax.CallOption + ListNotifications []gax.CallOption + ComposeObject []gax.CallOption + DeleteObject []gax.CallOption + CancelResumableWrite []gax.CallOption + GetObject []gax.CallOption + ReadObject []gax.CallOption + UpdateObject []gax.CallOption + WriteObject []gax.CallOption + ListObjects []gax.CallOption + RewriteObject []gax.CallOption + StartResumableWrite []gax.CallOption + QueryWriteStatus []gax.CallOption + GetServiceAccount []gax.CallOption + CreateHmacKey []gax.CallOption + DeleteHmacKey []gax.CallOption + GetHmacKey []gax.CallOption + ListHmacKeys []gax.CallOption + UpdateHmacKey []gax.CallOption +} + +func defaultGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("storage.googleapis.com:443"), + internaloption.WithDefaultMTLSEndpoint("storage.mtls.googleapis.com:443"), + internaloption.WithDefaultAudience("https://storage.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultCallOptions() *CallOptions { + return &CallOptions{ + DeleteBucket: []gax.CallOption{}, + GetBucket: []gax.CallOption{}, + CreateBucket: []gax.CallOption{}, + ListBuckets: []gax.CallOption{}, + LockBucketRetentionPolicy: []gax.CallOption{}, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, + UpdateBucket: []gax.CallOption{}, + DeleteNotification: []gax.CallOption{}, + GetNotification: []gax.CallOption{}, + CreateNotification: []gax.CallOption{}, + ListNotifications: []gax.CallOption{}, + ComposeObject: []gax.CallOption{}, + DeleteObject: []gax.CallOption{}, + CancelResumableWrite: []gax.CallOption{}, + GetObject: []gax.CallOption{}, + ReadObject: []gax.CallOption{}, + UpdateObject: []gax.CallOption{}, + WriteObject: []gax.CallOption{}, + ListObjects: []gax.CallOption{}, + RewriteObject: []gax.CallOption{}, + StartResumableWrite: []gax.CallOption{}, + QueryWriteStatus: []gax.CallOption{}, + GetServiceAccount: []gax.CallOption{}, + CreateHmacKey: []gax.CallOption{}, + DeleteHmacKey: []gax.CallOption{}, + GetHmacKey: []gax.CallOption{}, + ListHmacKeys: []gax.CallOption{}, + UpdateHmacKey: []gax.CallOption{}, + } +} + +// internalClient is an interface that defines the methods available from Cloud Storage API. +type internalClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + DeleteBucket(context.Context, *storagepb.DeleteBucketRequest, ...gax.CallOption) error + GetBucket(context.Context, *storagepb.GetBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) + CreateBucket(context.Context, *storagepb.CreateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) + ListBuckets(context.Context, *storagepb.ListBucketsRequest, ...gax.CallOption) *BucketIterator + LockBucketRetentionPolicy(context.Context, *storagepb.LockBucketRetentionPolicyRequest, ...gax.CallOption) (*storagepb.Bucket, error) + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) + UpdateBucket(context.Context, *storagepb.UpdateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) + DeleteNotification(context.Context, *storagepb.DeleteNotificationRequest, ...gax.CallOption) error + GetNotification(context.Context, *storagepb.GetNotificationRequest, ...gax.CallOption) (*storagepb.Notification, error) + CreateNotification(context.Context, *storagepb.CreateNotificationRequest, ...gax.CallOption) (*storagepb.Notification, error) + ListNotifications(context.Context, *storagepb.ListNotificationsRequest, ...gax.CallOption) *NotificationIterator + ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error) + DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error + CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) + GetObject(context.Context, *storagepb.GetObjectRequest, ...gax.CallOption) (*storagepb.Object, error) + ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) + UpdateObject(context.Context, *storagepb.UpdateObjectRequest, ...gax.CallOption) (*storagepb.Object, error) + WriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) + ListObjects(context.Context, *storagepb.ListObjectsRequest, ...gax.CallOption) *ObjectIterator + RewriteObject(context.Context, *storagepb.RewriteObjectRequest, ...gax.CallOption) (*storagepb.RewriteResponse, error) + StartResumableWrite(context.Context, *storagepb.StartResumableWriteRequest, ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) + QueryWriteStatus(context.Context, *storagepb.QueryWriteStatusRequest, ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) + GetServiceAccount(context.Context, *storagepb.GetServiceAccountRequest, ...gax.CallOption) (*storagepb.ServiceAccount, error) + CreateHmacKey(context.Context, *storagepb.CreateHmacKeyRequest, ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) + DeleteHmacKey(context.Context, *storagepb.DeleteHmacKeyRequest, ...gax.CallOption) error + GetHmacKey(context.Context, *storagepb.GetHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) + ListHmacKeys(context.Context, *storagepb.ListHmacKeysRequest, ...gax.CallOption) *HmacKeyMetadataIterator + UpdateHmacKey(context.Context, *storagepb.UpdateHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) +} + +// Client is a client for interacting with Cloud Storage API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// API Overview and Naming SyntaxThe Cloud Storage gRPC API allows applications to read and write data through +// the abstractions of buckets and objects. For a description of these +// abstractions please see https://cloud.google.com/storage/docs (at https://cloud.google.com/storage/docs). +// +// Resources are named as follows: +// +// Projects are referred to as they are defined by the Resource Manager API, +// using strings like projects/123456 or projects/my-string-id. +// +// Buckets are named using string names of the form: +// projects/{project}/buckets/{bucket} +// For globally unique buckets, _ may be substituted for the project. +// +// Objects are uniquely identified by their name along with the name of the +// bucket they belong to, as separate strings in this API. For example: +// +// ReadObjectRequest { +// bucket: ‘projects/_/buckets/my-bucket’ +// object: ‘my-object’ +// } +// Note that object names can contain / characters, which are treated as +// any other character (no special directory semantics). +type Client struct { + // The internal transport-dependent client. + internalClient internalClient + + // The call options for this service. + CallOptions *CallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *Client) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// DeleteBucket permanently deletes an empty bucket. +func (c *Client) DeleteBucket(ctx context.Context, req *storagepb.DeleteBucketRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteBucket(ctx, req, opts...) +} + +// GetBucket returns metadata for the specified bucket. +func (c *Client) GetBucket(ctx context.Context, req *storagepb.GetBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + return c.internalClient.GetBucket(ctx, req, opts...) +} + +// CreateBucket creates a new bucket. +func (c *Client) CreateBucket(ctx context.Context, req *storagepb.CreateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + return c.internalClient.CreateBucket(ctx, req, opts...) +} + +// ListBuckets retrieves a list of buckets for a given project. +func (c *Client) ListBuckets(ctx context.Context, req *storagepb.ListBucketsRequest, opts ...gax.CallOption) *BucketIterator { + return c.internalClient.ListBuckets(ctx, req, opts...) +} + +// LockBucketRetentionPolicy locks retention policy on a bucket. +func (c *Client) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.LockBucketRetentionPolicyRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + return c.internalClient.LockBucketRetentionPolicy(ctx, req, opts...) +} + +// GetIamPolicy gets the IAM policy for a specified bucket or object. +// The resource field in the request should be +// projects//buckets/ for a bucket or +// projects//buckets//objects/ for an object. +func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.GetIamPolicy(ctx, req, opts...) +} + +// SetIamPolicy updates an IAM policy for the specified bucket or object. +// The resource field in the request should be +// projects//buckets/ for a bucket or +// projects//buckets//objects/ for an object. +func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.SetIamPolicy(ctx, req, opts...) +} + +// TestIamPermissions tests a set of permissions on the given bucket or object to see which, if +// any, are held by the caller. +// The resource field in the request should be +// projects//buckets/ for a bucket or +// projects//buckets//objects/ for an object. +func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + return c.internalClient.TestIamPermissions(ctx, req, opts...) +} + +// UpdateBucket updates a bucket. Equivalent to JSON API’s storage.buckets.patch method. +func (c *Client) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + return c.internalClient.UpdateBucket(ctx, req, opts...) +} + +// DeleteNotification permanently deletes a notification subscription. +func (c *Client) DeleteNotification(ctx context.Context, req *storagepb.DeleteNotificationRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteNotification(ctx, req, opts...) +} + +// GetNotification view a notification config. +func (c *Client) GetNotification(ctx context.Context, req *storagepb.GetNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { + return c.internalClient.GetNotification(ctx, req, opts...) +} + +// CreateNotification creates a notification subscription for a given bucket. +// These notifications, when triggered, publish messages to the specified +// Pub/Sub topics. +// See https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications). +func (c *Client) CreateNotification(ctx context.Context, req *storagepb.CreateNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { + return c.internalClient.CreateNotification(ctx, req, opts...) +} + +// ListNotifications retrieves a list of notification subscriptions for a given bucket. +func (c *Client) ListNotifications(ctx context.Context, req *storagepb.ListNotificationsRequest, opts ...gax.CallOption) *NotificationIterator { + return c.internalClient.ListNotifications(ctx, req, opts...) +} + +// ComposeObject concatenates a list of existing objects into a new object in the same +// bucket. +func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + return c.internalClient.ComposeObject(ctx, req, opts...) +} + +// DeleteObject deletes an object and its metadata. Deletions are permanent if versioning +// is not enabled for the bucket, or if the generation parameter is used. +func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteObject(ctx, req, opts...) +} + +// CancelResumableWrite cancels an in-progress resumable upload. +func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { + return c.internalClient.CancelResumableWrite(ctx, req, opts...) +} + +// GetObject retrieves an object’s metadata. +func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + return c.internalClient.GetObject(ctx, req, opts...) +} + +// ReadObject reads an object’s data. +func (c *Client) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) { + return c.internalClient.ReadObject(ctx, req, opts...) +} + +// UpdateObject updates an object’s metadata. +// Equivalent to JSON API’s storage.objects.patch. +func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + return c.internalClient.UpdateObject(ctx, req, opts...) +} + +// WriteObject stores a new object and metadata. +// +// An object can be written either in a single message stream or in a +// resumable sequence of message streams. To write using a single stream, +// the client should include in the first message of the stream an +// WriteObjectSpec describing the destination bucket, object, and any +// preconditions. Additionally, the final message must set ‘finish_write’ to +// true, or else it is an error. +// +// For a resumable write, the client should instead call +// StartResumableWrite(), populating a WriteObjectSpec into that request. +// They should then attach the returned upload_id to the first message of +// each following call to WriteObject. If the stream is closed before +// finishing the upload (either explicitly by the client or due to a network +// error or an error response from the server), the client should do as +// follows: +// +// Check the result Status of the stream, to determine if writing can be +// resumed on this stream or must be restarted from scratch (by calling +// StartResumableWrite()). The resumable errors are DEADLINE_EXCEEDED, +// INTERNAL, and UNAVAILABLE. For each case, the client should use binary +// exponential backoff before retrying. Additionally, writes can be +// resumed after RESOURCE_EXHAUSTED errors, but only after taking +// appropriate measures, which may include reducing aggregate send rate +// across clients and/or requesting a quota increase for your project. +// +// If the call to WriteObject returns ABORTED, that indicates +// concurrent attempts to update the resumable write, caused either by +// multiple racing clients or by a single client where the previous +// request was timed out on the client side but nonetheless reached the +// server. In this case the client should take steps to prevent further +// concurrent writes (e.g., increase the timeouts, stop using more than +// one process to perform the upload, etc.), and then should follow the +// steps below for resuming the upload. +// +// For resumable errors, the client should call QueryWriteStatus() and +// then continue writing from the returned persisted_size. This may be +// less than the amount of data the client previously sent. Note also that +// it is acceptable to send data starting at an offset earlier than the +// returned persisted_size; in this case, the service will skip data at +// offsets that were already persisted (without checking that it matches +// the previously written data), and write only the data starting from the +// persisted offset. This behavior can make client-side handling simpler +// in some cases. +// +// The service will not view the object as complete until the client has +// sent a WriteObjectRequest with finish_write set to true. Sending any +// requests on a stream after sending a request with finish_write set to +// true will cause an error. The client should check the response it +// receives to determine how much data the service was able to commit and +// whether the service views the object as complete. +// +// Attempting to resume an already finalized object will result in an OK +// status, with a WriteObjectResponse containing the finalized object’s +// metadata. +func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { + return c.internalClient.WriteObject(ctx, opts...) +} + +// ListObjects retrieves a list of objects matching the criteria. +func (c *Client) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { + return c.internalClient.ListObjects(ctx, req, opts...) +} + +// RewriteObject rewrites a source object to a destination object. Optionally overrides +// metadata. +func (c *Client) RewriteObject(ctx context.Context, req *storagepb.RewriteObjectRequest, opts ...gax.CallOption) (*storagepb.RewriteResponse, error) { + return c.internalClient.RewriteObject(ctx, req, opts...) +} + +// StartResumableWrite starts a resumable write. How long the write operation remains valid, and +// what happens when the write operation becomes invalid, are +// service-dependent. +func (c *Client) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) { + return c.internalClient.StartResumableWrite(ctx, req, opts...) +} + +// QueryWriteStatus determines the persisted_size for an object that is being written, which +// can then be used as the write_offset for the next Write() call. +// +// If the object does not exist (i.e., the object has been deleted, or the +// first Write() has not yet reached the service), this method returns the +// error NOT_FOUND. +// +// The client may call QueryWriteStatus() at any time to determine how +// much data has been processed for this object. This is useful if the +// client is buffering data and needs to know which data can be safely +// evicted. For any sequence of QueryWriteStatus() calls for a given +// object name, the sequence of returned persisted_size values will be +// non-decreasing. +func (c *Client) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) { + return c.internalClient.QueryWriteStatus(ctx, req, opts...) +} + +// GetServiceAccount retrieves the name of a project’s Google Cloud Storage service account. +func (c *Client) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) { + return c.internalClient.GetServiceAccount(ctx, req, opts...) +} + +// CreateHmacKey creates a new HMAC key for the given service account. +func (c *Client) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) { + return c.internalClient.CreateHmacKey(ctx, req, opts...) +} + +// DeleteHmacKey deletes a given HMAC key. Key must be in an INACTIVE state. +func (c *Client) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteHmacKey(ctx, req, opts...) +} + +// GetHmacKey gets an existing HMAC key metadata for the given id. +func (c *Client) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { + return c.internalClient.GetHmacKey(ctx, req, opts...) +} + +// ListHmacKeys lists HMAC keys under a given project with the additional filters provided. +func (c *Client) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator { + return c.internalClient.ListHmacKeys(ctx, req, opts...) +} + +// UpdateHmacKey updates a given HMAC key state between ACTIVE and INACTIVE. +func (c *Client) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { + return c.internalClient.UpdateHmacKey(ctx, req, opts...) +} + +// gRPCClient is a client for interacting with Cloud Storage API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type gRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE + disableDeadlines bool + + // Points back to the CallOptions field of the containing Client + CallOptions **CallOptions + + // The gRPC API client. + client storagepb.StorageClient + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new storage client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// API Overview and Naming SyntaxThe Cloud Storage gRPC API allows applications to read and write data through +// the abstractions of buckets and objects. For a description of these +// abstractions please see https://cloud.google.com/storage/docs (at https://cloud.google.com/storage/docs). +// +// Resources are named as follows: +// +// Projects are referred to as they are defined by the Resource Manager API, +// using strings like projects/123456 or projects/my-string-id. +// +// Buckets are named using string names of the form: +// projects/{project}/buckets/{bucket} +// For globally unique buckets, _ may be substituted for the project. +// +// Objects are uniquely identified by their name along with the name of the +// bucket they belong to, as separate strings in this API. For example: +// +// ReadObjectRequest { +// bucket: ‘projects/_/buckets/my-bucket’ +// object: ‘my-object’ +// } +// Note that object names can contain / characters, which are treated as +// any other character (no special directory semantics). +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + clientOpts := defaultGRPCClientOptions() + if newClientHook != nil { + hookOpts, err := newClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + disableDeadlines, err := checkDisableDeadlines() + if err != nil { + return nil, err + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := Client{CallOptions: defaultCallOptions()} + + c := &gRPCClient{ + connPool: connPool, + disableDeadlines: disableDeadlines, + client: storagepb.NewStorageClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *gRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *gRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *gRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBucketRequest, opts ...gax.CallOption) error { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteBucket(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetBucket[0:len((*c.CallOptions).GetBucket):len((*c.CallOptions).GetBucket)], opts...) + var resp *storagepb.Bucket + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetBucket(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CreateBucket[0:len((*c.CallOptions).CreateBucket):len((*c.CallOptions).CreateBucket)], opts...) + var resp *storagepb.Bucket + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateBucket(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBucketsRequest, opts ...gax.CallOption) *BucketIterator { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListBuckets[0:len((*c.CallOptions).ListBuckets):len((*c.CallOptions).ListBuckets)], opts...) + it := &BucketIterator{} + req = proto.Clone(req).(*storagepb.ListBucketsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.Bucket, string, error) { + resp := &storagepb.ListBucketsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListBuckets(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetBuckets(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.LockBucketRetentionPolicyRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).LockBucketRetentionPolicy[0:len((*c.CallOptions).LockBucketRetentionPolicy):len((*c.CallOptions).LockBucketRetentionPolicy)], opts...) + var resp *storagepb.Bucket + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.LockBucketRetentionPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket().GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).UpdateBucket[0:len((*c.CallOptions).UpdateBucket):len((*c.CallOptions).UpdateBucket)], opts...) + var resp *storagepb.Bucket + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateBucket(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) DeleteNotification(ctx context.Context, req *storagepb.DeleteNotificationRequest, opts ...gax.CallOption) error { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).DeleteNotification[0:len((*c.CallOptions).DeleteNotification):len((*c.CallOptions).DeleteNotification)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteNotification(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetNotification[0:len((*c.CallOptions).GetNotification):len((*c.CallOptions).GetNotification)], opts...) + var resp *storagepb.Notification + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetNotification(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) CreateNotification(ctx context.Context, req *storagepb.CreateNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CreateNotification[0:len((*c.CallOptions).CreateNotification):len((*c.CallOptions).CreateNotification)], opts...) + var resp *storagepb.Notification + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateNotification(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListNotificationsRequest, opts ...gax.CallOption) *NotificationIterator { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListNotifications[0:len((*c.CallOptions).ListNotifications):len((*c.CallOptions).ListNotifications)], opts...) + it := &NotificationIterator{} + req = proto.Clone(req).(*storagepb.ListNotificationsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.Notification, string, error) { + resp := &storagepb.ListNotificationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListNotifications(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetNotifications(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetDestination().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ComposeObject[0:len((*c.CallOptions).ComposeObject):len((*c.CallOptions).ComposeObject)], opts...) + var resp *storagepb.Object + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ComposeObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).DeleteObject[0:len((*c.CallOptions).DeleteObject):len((*c.CallOptions).DeleteObject)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteObject(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CancelResumableWrite[0:len((*c.CallOptions).CancelResumableWrite):len((*c.CallOptions).CancelResumableWrite)], opts...) + var resp *storagepb.CancelResumableWriteResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CancelResumableWrite(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetObject[0:len((*c.CallOptions).GetObject):len((*c.CallOptions).GetObject)], opts...) + var resp *storagepb.Object + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ReadObject[0:len((*c.CallOptions).ReadObject):len((*c.CallOptions).ReadObject)], opts...) + var resp storagepb.Storage_ReadObjectClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ReadObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetObject().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).UpdateObject[0:len((*c.CallOptions).UpdateObject):len((*c.CallOptions).UpdateObject)], opts...) + var resp *storagepb.Object + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + var resp storagepb.Storage_WriteObjectClient + opts = append((*c.CallOptions).WriteObject[0:len((*c.CallOptions).WriteObject):len((*c.CallOptions).WriteObject)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.WriteObject(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListObjects[0:len((*c.CallOptions).ListObjects):len((*c.CallOptions).ListObjects)], opts...) + it := &ObjectIterator{} + req = proto.Clone(req).(*storagepb.ListObjectsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.Object, string, error) { + resp := &storagepb.ListObjectsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListObjects(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetObjects(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteObjectRequest, opts ...gax.CallOption) (*storagepb.RewriteResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetSourceBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1])) > 0 { + routingHeadersMap["source_bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1]) + } + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetDestinationBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).RewriteObject[0:len((*c.CallOptions).RewriteObject):len((*c.CallOptions).RewriteObject)], opts...) + var resp *storagepb.RewriteResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.RewriteObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetWriteObjectSpec().GetResource().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).StartResumableWrite[0:len((*c.CallOptions).StartResumableWrite):len((*c.CallOptions).StartResumableWrite)], opts...) + var resp *storagepb.StartResumableWriteResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.StartResumableWrite(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).QueryWriteStatus[0:len((*c.CallOptions).QueryWriteStatus):len((*c.CallOptions).QueryWriteStatus)], opts...) + var resp *storagepb.QueryWriteStatusResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.QueryWriteStatus(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetServiceAccount[0:len((*c.CallOptions).GetServiceAccount):len((*c.CallOptions).GetServiceAccount)], opts...) + var resp *storagepb.ServiceAccount + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetServiceAccount(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CreateHmacKey[0:len((*c.CallOptions).CreateHmacKey):len((*c.CallOptions).CreateHmacKey)], opts...) + var resp *storagepb.CreateHmacKeyResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateHmacKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).DeleteHmacKey[0:len((*c.CallOptions).DeleteHmacKey):len((*c.CallOptions).DeleteHmacKey)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteHmacKey(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetHmacKey[0:len((*c.CallOptions).GetHmacKey):len((*c.CallOptions).GetHmacKey)], opts...) + var resp *storagepb.HmacKeyMetadata + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetHmacKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListHmacKeys[0:len((*c.CallOptions).ListHmacKeys):len((*c.CallOptions).ListHmacKeys)], opts...) + it := &HmacKeyMetadataIterator{} + req = proto.Clone(req).(*storagepb.ListHmacKeysRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.HmacKeyMetadata, string, error) { + resp := &storagepb.ListHmacKeysResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListHmacKeys(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetHmacKeys(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetHmacKey().GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).UpdateHmacKey[0:len((*c.CallOptions).UpdateHmacKey):len((*c.CallOptions).UpdateHmacKey)], opts...) + var resp *storagepb.HmacKeyMetadata + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateHmacKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// BucketIterator manages a stream of *storagepb.Bucket. +type BucketIterator struct { + items []*storagepb.Bucket + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Bucket, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *BucketIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *BucketIterator) Next() (*storagepb.Bucket, error) { + var item *storagepb.Bucket + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *BucketIterator) bufLen() int { + return len(it.items) +} + +func (it *BucketIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// HmacKeyMetadataIterator manages a stream of *storagepb.HmacKeyMetadata. +type HmacKeyMetadataIterator struct { + items []*storagepb.HmacKeyMetadata + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.HmacKeyMetadata, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *HmacKeyMetadataIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *HmacKeyMetadataIterator) Next() (*storagepb.HmacKeyMetadata, error) { + var item *storagepb.HmacKeyMetadata + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *HmacKeyMetadataIterator) bufLen() int { + return len(it.items) +} + +func (it *HmacKeyMetadataIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// NotificationIterator manages a stream of *storagepb.Notification. +type NotificationIterator struct { + items []*storagepb.Notification + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Notification, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationIterator) Next() (*storagepb.Notification, error) { + var item *storagepb.Notification + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ObjectIterator manages a stream of *storagepb.Object. +type ObjectIterator struct { + items []*storagepb.Object + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Object, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ObjectIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ObjectIterator) Next() (*storagepb.Object, error) { + var item *storagepb.Object + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ObjectIterator) bufLen() int { + return len(it.items) +} + +func (it *ObjectIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go new file mode 100644 index 0000000000000..f81e216c575ef --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go @@ -0,0 +1,10746 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.9 +// source: google/storage/v2/storage.proto + +package storage + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + v1 "google.golang.org/genproto/googleapis/iam/v1" + date "google.golang.org/genproto/googleapis/type/date" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A collection of constant values meaningful to the Storage API. +type ServiceConstants_Values int32 + +const ( + // Unused. Proto3 requires first enum to be 0. + ServiceConstants_VALUES_UNSPECIFIED ServiceConstants_Values = 0 + // The maximum size chunk that can will be returned in a single + // ReadRequest. + // 2 MiB. + ServiceConstants_MAX_READ_CHUNK_BYTES ServiceConstants_Values = 2097152 + // The maximum size chunk that can be sent in a single WriteObjectRequest. + // 2 MiB. + ServiceConstants_MAX_WRITE_CHUNK_BYTES ServiceConstants_Values = 2097152 + // The maximum size of an object in MB - whether written in a single stream + // or composed from multiple other objects. + // 5 TiB. + ServiceConstants_MAX_OBJECT_SIZE_MB ServiceConstants_Values = 5242880 + // The maximum length field name that can be sent in a single + // custom metadata field. + // 1 KiB. + ServiceConstants_MAX_CUSTOM_METADATA_FIELD_NAME_BYTES ServiceConstants_Values = 1024 + // The maximum length field value that can be sent in a single + // custom_metadata field. + // 4 KiB. + ServiceConstants_MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES ServiceConstants_Values = 4096 + // The maximum total bytes that can be populated into all field names and + // values of the custom_metadata for one object. + // 8 KiB. + ServiceConstants_MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES ServiceConstants_Values = 8192 + // The maximum total bytes that can be populated into all bucket metadata + // fields. + // 20 KiB. + ServiceConstants_MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES ServiceConstants_Values = 20480 + // The maximum number of NotificationConfigs that can be registered + // for a given bucket. + ServiceConstants_MAX_NOTIFICATION_CONFIGS_PER_BUCKET ServiceConstants_Values = 100 + // The maximum number of LifecycleRules that can be registered for a given + // bucket. + ServiceConstants_MAX_LIFECYCLE_RULES_PER_BUCKET ServiceConstants_Values = 100 + // The maximum number of custom attributes per NotificationConfigs. + ServiceConstants_MAX_NOTIFICATION_CUSTOM_ATTRIBUTES ServiceConstants_Values = 5 + // The maximum length of a custom attribute key included in + // NotificationConfig. + ServiceConstants_MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH ServiceConstants_Values = 256 + // The maximum length of a custom attribute value included in a + // NotificationConfig. + ServiceConstants_MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH ServiceConstants_Values = 1024 + // The maximum number of key/value entries per bucket label. + ServiceConstants_MAX_LABELS_ENTRIES_COUNT ServiceConstants_Values = 64 + // The maximum character length of the key or value in a bucket + // label map. + ServiceConstants_MAX_LABELS_KEY_VALUE_LENGTH ServiceConstants_Values = 63 + // The maximum byte size of the key or value in a bucket label + // map. + ServiceConstants_MAX_LABELS_KEY_VALUE_BYTES ServiceConstants_Values = 128 + // The maximum number of object IDs that can be included in a + // DeleteObjectsRequest. + ServiceConstants_MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST ServiceConstants_Values = 1000 + // The maximum number of days for which a token returned by the + // GetListObjectsSplitPoints RPC is valid. + ServiceConstants_SPLIT_TOKEN_MAX_VALID_DAYS ServiceConstants_Values = 14 +) + +// Enum value maps for ServiceConstants_Values. +var ( + ServiceConstants_Values_name = map[int32]string{ + 0: "VALUES_UNSPECIFIED", + 2097152: "MAX_READ_CHUNK_BYTES", + // Duplicate value: 2097152: "MAX_WRITE_CHUNK_BYTES", + 5242880: "MAX_OBJECT_SIZE_MB", + 1024: "MAX_CUSTOM_METADATA_FIELD_NAME_BYTES", + 4096: "MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES", + 8192: "MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES", + 20480: "MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES", + 100: "MAX_NOTIFICATION_CONFIGS_PER_BUCKET", + // Duplicate value: 100: "MAX_LIFECYCLE_RULES_PER_BUCKET", + 5: "MAX_NOTIFICATION_CUSTOM_ATTRIBUTES", + 256: "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH", + // Duplicate value: 1024: "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH", + 64: "MAX_LABELS_ENTRIES_COUNT", + 63: "MAX_LABELS_KEY_VALUE_LENGTH", + 128: "MAX_LABELS_KEY_VALUE_BYTES", + 1000: "MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST", + 14: "SPLIT_TOKEN_MAX_VALID_DAYS", + } + ServiceConstants_Values_value = map[string]int32{ + "VALUES_UNSPECIFIED": 0, + "MAX_READ_CHUNK_BYTES": 2097152, + "MAX_WRITE_CHUNK_BYTES": 2097152, + "MAX_OBJECT_SIZE_MB": 5242880, + "MAX_CUSTOM_METADATA_FIELD_NAME_BYTES": 1024, + "MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES": 4096, + "MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES": 8192, + "MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES": 20480, + "MAX_NOTIFICATION_CONFIGS_PER_BUCKET": 100, + "MAX_LIFECYCLE_RULES_PER_BUCKET": 100, + "MAX_NOTIFICATION_CUSTOM_ATTRIBUTES": 5, + "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH": 256, + "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH": 1024, + "MAX_LABELS_ENTRIES_COUNT": 64, + "MAX_LABELS_KEY_VALUE_LENGTH": 63, + "MAX_LABELS_KEY_VALUE_BYTES": 128, + "MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST": 1000, + "SPLIT_TOKEN_MAX_VALID_DAYS": 14, + } +) + +func (x ServiceConstants_Values) Enum() *ServiceConstants_Values { + p := new(ServiceConstants_Values) + *p = x + return p +} + +func (x ServiceConstants_Values) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ServiceConstants_Values) Descriptor() protoreflect.EnumDescriptor { + return file_google_storage_v2_storage_proto_enumTypes[0].Descriptor() +} + +func (ServiceConstants_Values) Type() protoreflect.EnumType { + return &file_google_storage_v2_storage_proto_enumTypes[0] +} + +func (x ServiceConstants_Values) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ServiceConstants_Values.Descriptor instead. +func (ServiceConstants_Values) EnumDescriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39, 0} +} + +// Request message for DeleteBucket. +type DeleteBucketRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of a bucket to delete. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If set, only deletes the bucket if its metageneration matches this value. + IfMetagenerationMatch *int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // If set, only deletes the bucket if its metageneration does not match this + // value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` +} + +func (x *DeleteBucketRequest) Reset() { + *x = DeleteBucketRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteBucketRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteBucketRequest) ProtoMessage() {} + +func (x *DeleteBucketRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteBucketRequest.ProtoReflect.Descriptor instead. +func (*DeleteBucketRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{0} +} + +func (x *DeleteBucketRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DeleteBucketRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *DeleteBucketRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +// Request message for GetBucket. +type GetBucketRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of a bucket. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If set, and if the bucket's current metageneration does not match the + // specified value, the request will return an error. + IfMetagenerationMatch *int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // If set, and if the bucket's current metageneration matches the specified + // value, the request will return an error. + IfMetagenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // Mask specifying which fields to read. + // A "*" field may be used to indicate all fields. + // If no mask is specified, will default to all fields. + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` +} + +func (x *GetBucketRequest) Reset() { + *x = GetBucketRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBucketRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBucketRequest) ProtoMessage() {} + +func (x *GetBucketRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBucketRequest.ProtoReflect.Descriptor instead. +func (*GetBucketRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{1} +} + +func (x *GetBucketRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *GetBucketRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *GetBucketRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *GetBucketRequest) GetReadMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.ReadMask + } + return nil +} + +// Request message for CreateBucket. +type CreateBucketRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The project to which this bucket will belong. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Properties of the new bucket being inserted. + // The project and name of the bucket are specified in the parent and + // bucket_id fields, respectively. Populating those fields in `bucket` will + // result in an error. + Bucket *Bucket `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. The ID to use for this bucket, which will become the final + // component of the bucket's resource name. For example, the value `foo` might + // result in a bucket with the name `projects/123456/buckets/foo`. + BucketId string `protobuf:"bytes,3,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` + // Apply a predefined set of access controls to this bucket. + // Valid values are "authenticatedRead", "private", "projectPrivate", + // "publicRead", or "publicReadWrite". + PredefinedAcl string `protobuf:"bytes,6,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` + // Apply a predefined set of default object access controls to this bucket. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + PredefinedDefaultObjectAcl string `protobuf:"bytes,7,opt,name=predefined_default_object_acl,json=predefinedDefaultObjectAcl,proto3" json:"predefined_default_object_acl,omitempty"` +} + +func (x *CreateBucketRequest) Reset() { + *x = CreateBucketRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateBucketRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateBucketRequest) ProtoMessage() {} + +func (x *CreateBucketRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateBucketRequest.ProtoReflect.Descriptor instead. +func (*CreateBucketRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{2} +} + +func (x *CreateBucketRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateBucketRequest) GetBucket() *Bucket { + if x != nil { + return x.Bucket + } + return nil +} + +func (x *CreateBucketRequest) GetBucketId() string { + if x != nil { + return x.BucketId + } + return "" +} + +func (x *CreateBucketRequest) GetPredefinedAcl() string { + if x != nil { + return x.PredefinedAcl + } + return "" +} + +func (x *CreateBucketRequest) GetPredefinedDefaultObjectAcl() string { + if x != nil { + return x.PredefinedDefaultObjectAcl + } + return "" +} + +// Request message for ListBuckets. +type ListBucketsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The project whose buckets we are listing. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Maximum number of buckets to return in a single response. The service will + // use this parameter or 1,000 items, whichever is smaller. If "acl" is + // present in the read_mask, the service will use this parameter of 200 items, + // whichever is smaller. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // A previously-returned page token representing part of the larger set of + // results to view. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // Filter results to buckets whose names begin with this prefix. + Prefix string `protobuf:"bytes,4,opt,name=prefix,proto3" json:"prefix,omitempty"` + // Mask specifying which fields to read from each result. + // If no mask is specified, will default to all fields except items.owner, + // items.acl, and items.default_object_acl. + // * may be used to mean "all fields". + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` +} + +func (x *ListBucketsRequest) Reset() { + *x = ListBucketsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListBucketsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListBucketsRequest) ProtoMessage() {} + +func (x *ListBucketsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListBucketsRequest.ProtoReflect.Descriptor instead. +func (*ListBucketsRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{3} +} + +func (x *ListBucketsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListBucketsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListBucketsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListBucketsRequest) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *ListBucketsRequest) GetReadMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.ReadMask + } + return nil +} + +// The result of a call to Buckets.ListBuckets +type ListBucketsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of items. + Buckets []*Bucket `protobuf:"bytes,1,rep,name=buckets,proto3" json:"buckets,omitempty"` + // The continuation token, used to page through large result sets. Provide + // this value in a subsequent request to return the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListBucketsResponse) Reset() { + *x = ListBucketsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListBucketsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListBucketsResponse) ProtoMessage() {} + +func (x *ListBucketsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListBucketsResponse.ProtoReflect.Descriptor instead. +func (*ListBucketsResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{4} +} + +func (x *ListBucketsResponse) GetBuckets() []*Bucket { + if x != nil { + return x.Buckets + } + return nil +} + +func (x *ListBucketsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request message for LockBucketRetentionPolicyRequest. +type LockBucketRetentionPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of a bucket. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. Makes the operation conditional on whether bucket's current + // metageneration matches the given value. Must be positive. + IfMetagenerationMatch int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3" json:"if_metageneration_match,omitempty"` +} + +func (x *LockBucketRetentionPolicyRequest) Reset() { + *x = LockBucketRetentionPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LockBucketRetentionPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LockBucketRetentionPolicyRequest) ProtoMessage() {} + +func (x *LockBucketRetentionPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LockBucketRetentionPolicyRequest.ProtoReflect.Descriptor instead. +func (*LockBucketRetentionPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{5} +} + +func (x *LockBucketRetentionPolicyRequest) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *LockBucketRetentionPolicyRequest) GetIfMetagenerationMatch() int64 { + if x != nil { + return x.IfMetagenerationMatch + } + return 0 +} + +// Request for UpdateBucket method. +type UpdateBucketRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The bucket to update. + // The bucket's `name` field will be used to identify the bucket. + Bucket *Bucket `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // If set, will only modify the bucket if its metageneration matches this + // value. + IfMetagenerationMatch *int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // If set, will only modify the bucket if its metageneration does not match + // this value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // Apply a predefined set of access controls to this bucket. + // Valid values are "authenticatedRead", "private", "projectPrivate", + // "publicRead", or "publicReadWrite". + PredefinedAcl string `protobuf:"bytes,8,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` + // Apply a predefined set of default object access controls to this bucket. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + PredefinedDefaultObjectAcl string `protobuf:"bytes,9,opt,name=predefined_default_object_acl,json=predefinedDefaultObjectAcl,proto3" json:"predefined_default_object_acl,omitempty"` + // Required. List of fields to be updated. + // + // To specify ALL fields, equivalent to the JSON API's "update" function, + // specify a single field with the value `*`. Note: not recommended. If a new + // field is introduced at a later time, an older client updating with the `*` + // may accidentally reset the new field's value. + // + // Not specifying any fields is an error. + // Not specifying a field while setting that field to a non-default value is + // an error. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,6,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateBucketRequest) Reset() { + *x = UpdateBucketRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateBucketRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateBucketRequest) ProtoMessage() {} + +func (x *UpdateBucketRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateBucketRequest.ProtoReflect.Descriptor instead. +func (*UpdateBucketRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{6} +} + +func (x *UpdateBucketRequest) GetBucket() *Bucket { + if x != nil { + return x.Bucket + } + return nil +} + +func (x *UpdateBucketRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *UpdateBucketRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *UpdateBucketRequest) GetPredefinedAcl() string { + if x != nil { + return x.PredefinedAcl + } + return "" +} + +func (x *UpdateBucketRequest) GetPredefinedDefaultObjectAcl() string { + if x != nil { + return x.PredefinedDefaultObjectAcl + } + return "" +} + +func (x *UpdateBucketRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Request message for DeleteNotification. +type DeleteNotificationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The parent bucket of the notification. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteNotificationRequest) Reset() { + *x = DeleteNotificationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteNotificationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteNotificationRequest) ProtoMessage() {} + +func (x *DeleteNotificationRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteNotificationRequest.ProtoReflect.Descriptor instead. +func (*DeleteNotificationRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7} +} + +func (x *DeleteNotificationRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for GetNotification. +type GetNotificationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The parent bucket of the notification. + // Format: + // `projects/{project}/buckets/{bucket}/notificationConfigs/{notification}` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetNotificationRequest) Reset() { + *x = GetNotificationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNotificationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNotificationRequest) ProtoMessage() {} + +func (x *GetNotificationRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNotificationRequest.ProtoReflect.Descriptor instead. +func (*GetNotificationRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8} +} + +func (x *GetNotificationRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for CreateNotification. +type CreateNotificationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The bucket to which this notification belongs. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. Properties of the notification to be inserted. + Notification *Notification `protobuf:"bytes,2,opt,name=notification,proto3" json:"notification,omitempty"` +} + +func (x *CreateNotificationRequest) Reset() { + *x = CreateNotificationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateNotificationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateNotificationRequest) ProtoMessage() {} + +func (x *CreateNotificationRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateNotificationRequest.ProtoReflect.Descriptor instead. +func (*CreateNotificationRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9} +} + +func (x *CreateNotificationRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateNotificationRequest) GetNotification() *Notification { + if x != nil { + return x.Notification + } + return nil +} + +// Request message for ListNotifications. +type ListNotificationsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of a Google Cloud Storage bucket. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // The maximum number of notifications to return. The service may return fewer + // than this value. + // The default value is 100. Specifying a value above 100 will result in a + // page_size of 100. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // A page token, received from a previous `ListNotifications` call. + // Provide this to retrieve the subsequent page. + // + // When paginating, all other parameters provided to `ListNotifications` must + // match the call that provided the page token. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListNotificationsRequest) Reset() { + *x = ListNotificationsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationsRequest) ProtoMessage() {} + +func (x *ListNotificationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationsRequest.ProtoReflect.Descriptor instead. +func (*ListNotificationsRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10} +} + +func (x *ListNotificationsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListNotificationsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListNotificationsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The result of a call to Notifications.ListNotifications +type ListNotificationsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of items. + Notifications []*Notification `protobuf:"bytes,1,rep,name=notifications,proto3" json:"notifications,omitempty"` + // A token, which can be sent as `page_token` to retrieve the next page. + // If this field is omitted, there are no subsequent pages. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListNotificationsResponse) Reset() { + *x = ListNotificationsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationsResponse) ProtoMessage() {} + +func (x *ListNotificationsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationsResponse.ProtoReflect.Descriptor instead. +func (*ListNotificationsResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11} +} + +func (x *ListNotificationsResponse) GetNotifications() []*Notification { + if x != nil { + return x.Notifications + } + return nil +} + +func (x *ListNotificationsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request message for ComposeObject. +type ComposeObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Properties of the resulting object. + Destination *Object `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` + // The list of source objects that will be concatenated into a single object. + SourceObjects []*ComposeObjectRequest_SourceObject `protobuf:"bytes,2,rep,name=source_objects,json=sourceObjects,proto3" json:"source_objects,omitempty"` + // Apply a predefined set of access controls to the destination object. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + DestinationPredefinedAcl string `protobuf:"bytes,9,opt,name=destination_predefined_acl,json=destinationPredefinedAcl,proto3" json:"destination_predefined_acl,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Resource name of the Cloud KMS key, of the form + // `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`, + // that will be used to encrypt the object. Overrides the object + // metadata's `kms_key_name` value, if any. + KmsKey string `protobuf:"bytes,6,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,7,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` + // The checksums of the complete object. This will be validated against the + // combined checksums of the component objects. + ObjectChecksums *ObjectChecksums `protobuf:"bytes,10,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` +} + +func (x *ComposeObjectRequest) Reset() { + *x = ComposeObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComposeObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComposeObjectRequest) ProtoMessage() {} + +func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComposeObjectRequest.ProtoReflect.Descriptor instead. +func (*ComposeObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12} +} + +func (x *ComposeObjectRequest) GetDestination() *Object { + if x != nil { + return x.Destination + } + return nil +} + +func (x *ComposeObjectRequest) GetSourceObjects() []*ComposeObjectRequest_SourceObject { + if x != nil { + return x.SourceObjects + } + return nil +} + +func (x *ComposeObjectRequest) GetDestinationPredefinedAcl() string { + if x != nil { + return x.DestinationPredefinedAcl + } + return "" +} + +func (x *ComposeObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *ComposeObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *ComposeObjectRequest) GetKmsKey() string { + if x != nil { + return x.KmsKey + } + return "" +} + +func (x *ComposeObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +func (x *ComposeObjectRequest) GetObjectChecksums() *ObjectChecksums { + if x != nil { + return x.ObjectChecksums + } + return nil +} + +// Message for deleting an object. +// `bucket` and `object` **must** be set. +type DeleteObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the bucket in which the object resides. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. The name of the finalized object to delete. + // Note: If you want to delete an unfinalized resumable upload please use + // `CancelResumableWrite`. + Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + // If present, permanently deletes a specific revision of this object (as + // opposed to the latest version, the default). + Generation int64 `protobuf:"varint,4,opt,name=generation,proto3" json:"generation,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,5,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *DeleteObjectRequest) Reset() { + *x = DeleteObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteObjectRequest) ProtoMessage() {} + +func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteObjectRequest.ProtoReflect.Descriptor instead. +func (*DeleteObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{13} +} + +func (x *DeleteObjectRequest) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *DeleteObjectRequest) GetObject() string { + if x != nil { + return x.Object + } + return "" +} + +func (x *DeleteObjectRequest) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *DeleteObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *DeleteObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *DeleteObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *DeleteObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *DeleteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +// Message for canceling an in-progress resumable upload. +// `upload_id` **must** be set. +type CancelResumableWriteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The upload_id of the resumable upload to cancel. This should be + // copied from the `upload_id` field of `StartResumableWriteResponse`. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` +} + +func (x *CancelResumableWriteRequest) Reset() { + *x = CancelResumableWriteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CancelResumableWriteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelResumableWriteRequest) ProtoMessage() {} + +func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead. +func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} +} + +func (x *CancelResumableWriteRequest) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +// Empty response message for canceling an in-progress resumable upload, will be +// extended as needed. +type CancelResumableWriteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CancelResumableWriteResponse) Reset() { + *x = CancelResumableWriteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CancelResumableWriteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelResumableWriteResponse) ProtoMessage() {} + +func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead. +func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} +} + +// Request message for ReadObject. +type ReadObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the bucket containing the object to read. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. The name of the object to read. + Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + // If present, selects a specific revision of this object (as opposed + // to the latest version, the default). + Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // The offset for the first byte to return in the read, relative to the start + // of the object. + // + // A negative `read_offset` value will be interpreted as the number of bytes + // back from the end of the object to be returned. For example, if an object's + // length is 15 bytes, a ReadObjectRequest with `read_offset` = -5 and + // `read_limit` = 3 would return bytes 10 through 12 of the object. Requesting + // a negative offset with magnitude larger than the size of the object will + // return the entire object. + ReadOffset int64 `protobuf:"varint,4,opt,name=read_offset,json=readOffset,proto3" json:"read_offset,omitempty"` + // The maximum number of `data` bytes the server is allowed to return in the + // sum of all `Object` messages. A `read_limit` of zero indicates that there + // is no limit, and a negative `read_limit` will cause an error. + // + // If the stream returns fewer bytes than allowed by the `read_limit` and no + // error occurred, the stream includes all data from the `read_offset` to the + // end of the resource. + ReadLimit int64 `protobuf:"varint,5,opt,name=read_limit,json=readLimit,proto3" json:"read_limit,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,6,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,9,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` + // Mask specifying which fields to read. + // The checksummed_data field and its children will always be present. + // If no mask is specified, will default to all fields except metadata.owner + // and metadata.acl. + // * may be used to mean "all fields". + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` +} + +func (x *ReadObjectRequest) Reset() { + *x = ReadObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadObjectRequest) ProtoMessage() {} + +func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead. +func (*ReadObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} +} + +func (x *ReadObjectRequest) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *ReadObjectRequest) GetObject() string { + if x != nil { + return x.Object + } + return "" +} + +func (x *ReadObjectRequest) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *ReadObjectRequest) GetReadOffset() int64 { + if x != nil { + return x.ReadOffset + } + return 0 +} + +func (x *ReadObjectRequest) GetReadLimit() int64 { + if x != nil { + return x.ReadLimit + } + return 0 +} + +func (x *ReadObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *ReadObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *ReadObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *ReadObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *ReadObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +func (x *ReadObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.ReadMask + } + return nil +} + +// Request message for GetObject. +type GetObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the bucket in which the object resides. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. Name of the object. + Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + // If present, selects a specific revision of this object (as opposed to the + // latest version, the default). + Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` + // Mask specifying which fields to read. + // If no mask is specified, will default to all fields except metadata.acl and + // metadata.owner. + // * may be used to mean "all fields". + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` +} + +func (x *GetObjectRequest) Reset() { + *x = GetObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetObjectRequest) ProtoMessage() {} + +func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead. +func (*GetObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} +} + +func (x *GetObjectRequest) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *GetObjectRequest) GetObject() string { + if x != nil { + return x.Object + } + return "" +} + +func (x *GetObjectRequest) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *GetObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *GetObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *GetObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *GetObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *GetObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.ReadMask + } + return nil +} + +// Response message for ReadObject. +type ReadObjectResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A portion of the data for the object. The service **may** leave `data` + // empty for any given `ReadResponse`. This enables the service to inform the + // client that the request is still live while it is running an operation to + // generate more data. + ChecksummedData *ChecksummedData `protobuf:"bytes,1,opt,name=checksummed_data,json=checksummedData,proto3" json:"checksummed_data,omitempty"` + // The checksums of the complete object. The client should compute one of + // these checksums over the downloaded object and compare it against the value + // provided here. + ObjectChecksums *ObjectChecksums `protobuf:"bytes,2,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` + // If read_offset and or read_limit was specified on the + // ReadObjectRequest, ContentRange will be populated on the first + // ReadObjectResponse message of the read stream. + ContentRange *ContentRange `protobuf:"bytes,3,opt,name=content_range,json=contentRange,proto3" json:"content_range,omitempty"` + // Metadata of the object whose media is being returned. + // Only populated in the first response in the stream. + Metadata *Object `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *ReadObjectResponse) Reset() { + *x = ReadObjectResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadObjectResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadObjectResponse) ProtoMessage() {} + +func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead. +func (*ReadObjectResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} +} + +func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData { + if x != nil { + return x.ChecksummedData + } + return nil +} + +func (x *ReadObjectResponse) GetObjectChecksums() *ObjectChecksums { + if x != nil { + return x.ObjectChecksums + } + return nil +} + +func (x *ReadObjectResponse) GetContentRange() *ContentRange { + if x != nil { + return x.ContentRange + } + return nil +} + +func (x *ReadObjectResponse) GetMetadata() *Object { + if x != nil { + return x.Metadata + } + return nil +} + +// Describes an attempt to insert an object, possibly over multiple requests. +type WriteObjectSpec struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Destination object, including its name and its metadata. + Resource *Object `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // Apply a predefined set of access controls to this object. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + PredefinedAcl string `protobuf:"bytes,7,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` + // Makes the operation conditional on whether the object's current + // generation matches the given value. Setting to 0 makes the operation + // succeed only if there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,3,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live + // generation does not match the given value. If no live object exists, the + // precondition fails. Setting to 0 makes the operation succeed only if + // there is a live version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,4,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // The expected final object size being uploaded. + // If this value is set, closing the stream after writing fewer or more than + // `object_size` bytes will result in an OUT_OF_RANGE error. + // + // This situation is considered a client error, and if such an error occurs + // you must start the upload over from scratch, this time sending the correct + // number of bytes. + // + // The `object_size` value is ignored for one-shot (non-resumable) writes. + ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"` +} + +func (x *WriteObjectSpec) Reset() { + *x = WriteObjectSpec{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WriteObjectSpec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteObjectSpec) ProtoMessage() {} + +func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead. +func (*WriteObjectSpec) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} +} + +func (x *WriteObjectSpec) GetResource() *Object { + if x != nil { + return x.Resource + } + return nil +} + +func (x *WriteObjectSpec) GetPredefinedAcl() string { + if x != nil { + return x.PredefinedAcl + } + return "" +} + +func (x *WriteObjectSpec) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *WriteObjectSpec) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *WriteObjectSpec) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *WriteObjectSpec) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *WriteObjectSpec) GetObjectSize() int64 { + if x != nil && x.ObjectSize != nil { + return *x.ObjectSize + } + return 0 +} + +// Request message for WriteObject. +type WriteObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The first message of each stream should set one of the following. + // + // Types that are assignable to FirstMessage: + // + // *WriteObjectRequest_UploadId + // *WriteObjectRequest_WriteObjectSpec + FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` + // Required. The offset from the beginning of the object at which the data + // should be written. + // + // In the first `WriteObjectRequest` of a `WriteObject()` action, it + // indicates the initial offset for the `Write()` call. The value **must** be + // equal to the `persisted_size` that a call to `QueryWriteStatus()` would + // return (0 if this is the first write to the object). + // + // On subsequent calls, this value **must** be no larger than the sum of the + // first `write_offset` and the sizes of all `data` chunks sent previously on + // this stream. + // + // An incorrect value will cause an error. + WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"` + // A portion of the data for the object. + // + // Types that are assignable to Data: + // + // *WriteObjectRequest_ChecksummedData + Data isWriteObjectRequest_Data `protobuf_oneof:"data"` + // Checksums for the complete object. If the checksums computed by the service + // don't match the specifified checksums the call will fail. May only be + // provided in the first or last request (either with first_message, or + // finish_write set). + ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` + // If `true`, this indicates that the write is complete. Sending any + // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true` + // will cause an error. + // For a non-resumable write (where the upload_id was not set in the first + // message), it is an error not to set this field in the final message of the + // stream. + FinishWrite bool `protobuf:"varint,7,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *WriteObjectRequest) Reset() { + *x = WriteObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WriteObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteObjectRequest) ProtoMessage() {} + +func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead. +func (*WriteObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} +} + +func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage { + if m != nil { + return m.FirstMessage + } + return nil +} + +func (x *WriteObjectRequest) GetUploadId() string { + if x, ok := x.GetFirstMessage().(*WriteObjectRequest_UploadId); ok { + return x.UploadId + } + return "" +} + +func (x *WriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec { + if x, ok := x.GetFirstMessage().(*WriteObjectRequest_WriteObjectSpec); ok { + return x.WriteObjectSpec + } + return nil +} + +func (x *WriteObjectRequest) GetWriteOffset() int64 { + if x != nil { + return x.WriteOffset + } + return 0 +} + +func (m *WriteObjectRequest) GetData() isWriteObjectRequest_Data { + if m != nil { + return m.Data + } + return nil +} + +func (x *WriteObjectRequest) GetChecksummedData() *ChecksummedData { + if x, ok := x.GetData().(*WriteObjectRequest_ChecksummedData); ok { + return x.ChecksummedData + } + return nil +} + +func (x *WriteObjectRequest) GetObjectChecksums() *ObjectChecksums { + if x != nil { + return x.ObjectChecksums + } + return nil +} + +func (x *WriteObjectRequest) GetFinishWrite() bool { + if x != nil { + return x.FinishWrite + } + return false +} + +func (x *WriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +type isWriteObjectRequest_FirstMessage interface { + isWriteObjectRequest_FirstMessage() +} + +type WriteObjectRequest_UploadId struct { + // For resumable uploads. This should be the `upload_id` returned from a + // call to `StartResumableWriteResponse`. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"` +} + +type WriteObjectRequest_WriteObjectSpec struct { + // For non-resumable uploads. Describes the overall upload, including the + // destination bucket and object name, preconditions, etc. + WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"` +} + +func (*WriteObjectRequest_UploadId) isWriteObjectRequest_FirstMessage() {} + +func (*WriteObjectRequest_WriteObjectSpec) isWriteObjectRequest_FirstMessage() {} + +type isWriteObjectRequest_Data interface { + isWriteObjectRequest_Data() +} + +type WriteObjectRequest_ChecksummedData struct { + // The data to insert. If a crc32c checksum is provided that doesn't match + // the checksum computed by the service, the request will fail. + ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"` +} + +func (*WriteObjectRequest_ChecksummedData) isWriteObjectRequest_Data() {} + +// Response message for WriteObject. +type WriteObjectResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The response will set one of the following. + // + // Types that are assignable to WriteStatus: + // + // *WriteObjectResponse_PersistedSize + // *WriteObjectResponse_Resource + WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` +} + +func (x *WriteObjectResponse) Reset() { + *x = WriteObjectResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WriteObjectResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteObjectResponse) ProtoMessage() {} + +func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead. +func (*WriteObjectResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} +} + +func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus { + if m != nil { + return m.WriteStatus + } + return nil +} + +func (x *WriteObjectResponse) GetPersistedSize() int64 { + if x, ok := x.GetWriteStatus().(*WriteObjectResponse_PersistedSize); ok { + return x.PersistedSize + } + return 0 +} + +func (x *WriteObjectResponse) GetResource() *Object { + if x, ok := x.GetWriteStatus().(*WriteObjectResponse_Resource); ok { + return x.Resource + } + return nil +} + +type isWriteObjectResponse_WriteStatus interface { + isWriteObjectResponse_WriteStatus() +} + +type WriteObjectResponse_PersistedSize struct { + // The total number of bytes that have been processed for the given object + // from all `WriteObject` calls. Only set if the upload has not finalized. + PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"` +} + +type WriteObjectResponse_Resource struct { + // A resource containing the metadata for the uploaded object. Only set if + // the upload has finalized. + Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"` +} + +func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {} + +func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {} + +// Request message for ListObjects. +type ListObjectsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the bucket in which to look for objects. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Maximum number of `items` plus `prefixes` to return + // in a single page of responses. As duplicate `prefixes` are + // omitted, fewer total results may be returned than requested. The service + // will use this parameter or 1,000 items, whichever is smaller. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // A previously-returned page token representing part of the larger set of + // results to view. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // If set, returns results in a directory-like mode. `items` will contain + // only objects whose names, aside from the `prefix`, do not + // contain `delimiter`. Objects whose names, aside from the + // `prefix`, contain `delimiter` will have their name, + // truncated after the `delimiter`, returned in + // `prefixes`. Duplicate `prefixes` are omitted. + Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"` + // If true, objects that end in exactly one instance of `delimiter` + // will have their metadata included in `items` in addition to + // `prefixes`. + IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"` + // Filter results to objects whose names begin with this prefix. + Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"` + // If `true`, lists all versions of an object as distinct results. + // For more information, see + // [Object + // Versioning](https://cloud.google.com/storage/docs/object-versioning). + Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"` + // Mask specifying which fields to read from each result. + // If no mask is specified, will default to all fields except items.acl and + // items.owner. + // * may be used to mean "all fields". + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + // Optional. Filter results to objects whose names are lexicographically equal + // to or after lexicographic_start. If lexicographic_end is also set, the + // objects listed have names between lexicographic_start (inclusive) and + // lexicographic_end (exclusive). + LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"` + // Optional. Filter results to objects whose names are lexicographically + // before lexicographic_end. If lexicographic_start is also set, the objects + // listed have names between lexicographic_start (inclusive) and + // lexicographic_end (exclusive). + LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"` +} + +func (x *ListObjectsRequest) Reset() { + *x = ListObjectsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListObjectsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListObjectsRequest) ProtoMessage() {} + +func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead. +func (*ListObjectsRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} +} + +func (x *ListObjectsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListObjectsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListObjectsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListObjectsRequest) GetDelimiter() string { + if x != nil { + return x.Delimiter + } + return "" +} + +func (x *ListObjectsRequest) GetIncludeTrailingDelimiter() bool { + if x != nil { + return x.IncludeTrailingDelimiter + } + return false +} + +func (x *ListObjectsRequest) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *ListObjectsRequest) GetVersions() bool { + if x != nil { + return x.Versions + } + return false +} + +func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.ReadMask + } + return nil +} + +func (x *ListObjectsRequest) GetLexicographicStart() string { + if x != nil { + return x.LexicographicStart + } + return "" +} + +func (x *ListObjectsRequest) GetLexicographicEnd() string { + if x != nil { + return x.LexicographicEnd + } + return "" +} + +// Request object for `QueryWriteStatus`. +type QueryWriteStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the resume token for the object whose write status is + // being requested. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,2,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *QueryWriteStatusRequest) Reset() { + *x = QueryWriteStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryWriteStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryWriteStatusRequest) ProtoMessage() {} + +func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead. +func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} +} + +func (x *QueryWriteStatusRequest) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +func (x *QueryWriteStatusRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +// Response object for `QueryWriteStatus`. +type QueryWriteStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The response will set one of the following. + // + // Types that are assignable to WriteStatus: + // + // *QueryWriteStatusResponse_PersistedSize + // *QueryWriteStatusResponse_Resource + WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"` +} + +func (x *QueryWriteStatusResponse) Reset() { + *x = QueryWriteStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryWriteStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryWriteStatusResponse) ProtoMessage() {} + +func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead. +func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} +} + +func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus { + if m != nil { + return m.WriteStatus + } + return nil +} + +func (x *QueryWriteStatusResponse) GetPersistedSize() int64 { + if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_PersistedSize); ok { + return x.PersistedSize + } + return 0 +} + +func (x *QueryWriteStatusResponse) GetResource() *Object { + if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_Resource); ok { + return x.Resource + } + return nil +} + +type isQueryWriteStatusResponse_WriteStatus interface { + isQueryWriteStatusResponse_WriteStatus() +} + +type QueryWriteStatusResponse_PersistedSize struct { + // The total number of bytes that have been processed for the given object + // from all `WriteObject` calls. This is the correct value for the + // 'write_offset' field to use when resuming the `WriteObject` operation. + // Only set if the upload has not finalized. + PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"` +} + +type QueryWriteStatusResponse_Resource struct { + // A resource containing the metadata for the uploaded object. Only set if + // the upload has finalized. + Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"` +} + +func (*QueryWriteStatusResponse_PersistedSize) isQueryWriteStatusResponse_WriteStatus() {} + +func (*QueryWriteStatusResponse_Resource) isQueryWriteStatusResponse_WriteStatus() {} + +// Request message for RewriteObject. +// If the source object is encrypted using a Customer-Supplied Encryption Key +// the key information must be provided in the copy_source_encryption_algorithm, +// copy_source_encryption_key_bytes, and copy_source_encryption_key_sha256_bytes +// fields. If the destination object should be encrypted the keying information +// should be provided in the encryption_algorithm, encryption_key_bytes, and +// encryption_key_sha256_bytes fields of the +// common_object_request_params.customer_encryption field. +type RewriteObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Immutable. The name of the destination object. + // See the + // [Naming Guidelines](https://cloud.google.com/storage/docs/objects#naming). + // Example: `test.txt` + // The `name` field by itself does not uniquely identify a Cloud Storage + // object. A Cloud Storage object is uniquely identified by the tuple of + // (bucket, object, generation). + DestinationName string `protobuf:"bytes,24,opt,name=destination_name,json=destinationName,proto3" json:"destination_name,omitempty"` + // Required. Immutable. The name of the bucket containing the destination + // object. + DestinationBucket string `protobuf:"bytes,25,opt,name=destination_bucket,json=destinationBucket,proto3" json:"destination_bucket,omitempty"` + // The name of the Cloud KMS key that will be used to encrypt the destination + // object. The Cloud KMS key must be located in same location as the object. + // If the parameter is not specified, the request uses the destination + // bucket's default encryption key, if any, or else the Google-managed + // encryption key. + DestinationKmsKey string `protobuf:"bytes,27,opt,name=destination_kms_key,json=destinationKmsKey,proto3" json:"destination_kms_key,omitempty"` + // Properties of the destination, post-rewrite object. + // The `name`, `bucket` and `kms_key` fields must not be populated (these + // values are specified in the `destination_name`, `destination_bucket`, and + // `destination_kms_key` fields). + // If `destination` is present it will be used to construct the destination + // object's metadata; otherwise the destination object's metadata will be + // copied from the source object. + Destination *Object `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` + // Required. Name of the bucket in which to find the source object. + SourceBucket string `protobuf:"bytes,2,opt,name=source_bucket,json=sourceBucket,proto3" json:"source_bucket,omitempty"` + // Required. Name of the source object. + SourceObject string `protobuf:"bytes,3,opt,name=source_object,json=sourceObject,proto3" json:"source_object,omitempty"` + // If present, selects a specific revision of the source object (as opposed to + // the latest version, the default). + SourceGeneration int64 `protobuf:"varint,4,opt,name=source_generation,json=sourceGeneration,proto3" json:"source_generation,omitempty"` + // Include this field (from the previous rewrite response) on each rewrite + // request after the first one, until the rewrite response 'done' flag is + // true. Calls that provide a rewriteToken can omit all other request fields, + // but if included those fields must match the values provided in the first + // rewrite request. + RewriteToken string `protobuf:"bytes,5,opt,name=rewrite_token,json=rewriteToken,proto3" json:"rewrite_token,omitempty"` + // Apply a predefined set of access controls to the destination object. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + DestinationPredefinedAcl string `protobuf:"bytes,28,opt,name=destination_predefined_acl,json=destinationPredefinedAcl,proto3" json:"destination_predefined_acl,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,7,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,8,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the destination object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,9,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the destination object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,10,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // Makes the operation conditional on whether the source object's live + // generation matches the given value. + IfSourceGenerationMatch *int64 `protobuf:"varint,11,opt,name=if_source_generation_match,json=ifSourceGenerationMatch,proto3,oneof" json:"if_source_generation_match,omitempty"` + // Makes the operation conditional on whether the source object's live + // generation does not match the given value. + IfSourceGenerationNotMatch *int64 `protobuf:"varint,12,opt,name=if_source_generation_not_match,json=ifSourceGenerationNotMatch,proto3,oneof" json:"if_source_generation_not_match,omitempty"` + // Makes the operation conditional on whether the source object's current + // metageneration matches the given value. + IfSourceMetagenerationMatch *int64 `protobuf:"varint,13,opt,name=if_source_metageneration_match,json=ifSourceMetagenerationMatch,proto3,oneof" json:"if_source_metageneration_match,omitempty"` + // Makes the operation conditional on whether the source object's current + // metageneration does not match the given value. + IfSourceMetagenerationNotMatch *int64 `protobuf:"varint,14,opt,name=if_source_metageneration_not_match,json=ifSourceMetagenerationNotMatch,proto3,oneof" json:"if_source_metageneration_not_match,omitempty"` + // The maximum number of bytes that will be rewritten per rewrite request. + // Most callers + // shouldn't need to specify this parameter - it is primarily in place to + // support testing. If specified the value must be an integral multiple of + // 1 MiB (1048576). Also, this only applies to requests where the source and + // destination span locations and/or storage classes. Finally, this value must + // not change across rewrite calls else you'll get an error that the + // `rewriteToken` is invalid. + MaxBytesRewrittenPerCall int64 `protobuf:"varint,15,opt,name=max_bytes_rewritten_per_call,json=maxBytesRewrittenPerCall,proto3" json:"max_bytes_rewritten_per_call,omitempty"` + // The algorithm used to encrypt the source object, if any. Used if the source + // object was encrypted with a Customer-Supplied Encryption Key. + CopySourceEncryptionAlgorithm string `protobuf:"bytes,16,opt,name=copy_source_encryption_algorithm,json=copySourceEncryptionAlgorithm,proto3" json:"copy_source_encryption_algorithm,omitempty"` + // The raw bytes (not base64-encoded) AES-256 encryption key used to encrypt + // the source object, if it was encrypted with a Customer-Supplied Encryption + // Key. + CopySourceEncryptionKeyBytes []byte `protobuf:"bytes,21,opt,name=copy_source_encryption_key_bytes,json=copySourceEncryptionKeyBytes,proto3" json:"copy_source_encryption_key_bytes,omitempty"` + // The raw bytes (not base64-encoded) SHA256 hash of the encryption key used + // to encrypt the source object, if it was encrypted with a Customer-Supplied + // Encryption Key. + CopySourceEncryptionKeySha256Bytes []byte `protobuf:"bytes,22,opt,name=copy_source_encryption_key_sha256_bytes,json=copySourceEncryptionKeySha256Bytes,proto3" json:"copy_source_encryption_key_sha256_bytes,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,19,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` + // The checksums of the complete object. This will be used to validate the + // destination object after rewriting. + ObjectChecksums *ObjectChecksums `protobuf:"bytes,29,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` +} + +func (x *RewriteObjectRequest) Reset() { + *x = RewriteObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RewriteObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RewriteObjectRequest) ProtoMessage() {} + +func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead. +func (*RewriteObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} +} + +func (x *RewriteObjectRequest) GetDestinationName() string { + if x != nil { + return x.DestinationName + } + return "" +} + +func (x *RewriteObjectRequest) GetDestinationBucket() string { + if x != nil { + return x.DestinationBucket + } + return "" +} + +func (x *RewriteObjectRequest) GetDestinationKmsKey() string { + if x != nil { + return x.DestinationKmsKey + } + return "" +} + +func (x *RewriteObjectRequest) GetDestination() *Object { + if x != nil { + return x.Destination + } + return nil +} + +func (x *RewriteObjectRequest) GetSourceBucket() string { + if x != nil { + return x.SourceBucket + } + return "" +} + +func (x *RewriteObjectRequest) GetSourceObject() string { + if x != nil { + return x.SourceObject + } + return "" +} + +func (x *RewriteObjectRequest) GetSourceGeneration() int64 { + if x != nil { + return x.SourceGeneration + } + return 0 +} + +func (x *RewriteObjectRequest) GetRewriteToken() string { + if x != nil { + return x.RewriteToken + } + return "" +} + +func (x *RewriteObjectRequest) GetDestinationPredefinedAcl() string { + if x != nil { + return x.DestinationPredefinedAcl + } + return "" +} + +func (x *RewriteObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfSourceGenerationMatch() int64 { + if x != nil && x.IfSourceGenerationMatch != nil { + return *x.IfSourceGenerationMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfSourceGenerationNotMatch() int64 { + if x != nil && x.IfSourceGenerationNotMatch != nil { + return *x.IfSourceGenerationNotMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfSourceMetagenerationMatch() int64 { + if x != nil && x.IfSourceMetagenerationMatch != nil { + return *x.IfSourceMetagenerationMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfSourceMetagenerationNotMatch() int64 { + if x != nil && x.IfSourceMetagenerationNotMatch != nil { + return *x.IfSourceMetagenerationNotMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetMaxBytesRewrittenPerCall() int64 { + if x != nil { + return x.MaxBytesRewrittenPerCall + } + return 0 +} + +func (x *RewriteObjectRequest) GetCopySourceEncryptionAlgorithm() string { + if x != nil { + return x.CopySourceEncryptionAlgorithm + } + return "" +} + +func (x *RewriteObjectRequest) GetCopySourceEncryptionKeyBytes() []byte { + if x != nil { + return x.CopySourceEncryptionKeyBytes + } + return nil +} + +func (x *RewriteObjectRequest) GetCopySourceEncryptionKeySha256Bytes() []byte { + if x != nil { + return x.CopySourceEncryptionKeySha256Bytes + } + return nil +} + +func (x *RewriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +func (x *RewriteObjectRequest) GetObjectChecksums() *ObjectChecksums { + if x != nil { + return x.ObjectChecksums + } + return nil +} + +// A rewrite response. +type RewriteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The total bytes written so far, which can be used to provide a waiting user + // with a progress indicator. This property is always present in the response. + TotalBytesRewritten int64 `protobuf:"varint,1,opt,name=total_bytes_rewritten,json=totalBytesRewritten,proto3" json:"total_bytes_rewritten,omitempty"` + // The total size of the object being copied in bytes. This property is always + // present in the response. + ObjectSize int64 `protobuf:"varint,2,opt,name=object_size,json=objectSize,proto3" json:"object_size,omitempty"` + // `true` if the copy is finished; otherwise, `false` if + // the copy is in progress. This property is always present in the response. + Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"` + // A token to use in subsequent requests to continue copying data. This token + // is present in the response only when there is more data to copy. + RewriteToken string `protobuf:"bytes,4,opt,name=rewrite_token,json=rewriteToken,proto3" json:"rewrite_token,omitempty"` + // A resource containing the metadata for the copied-to object. This property + // is present in the response only when copying completes. + Resource *Object `protobuf:"bytes,5,opt,name=resource,proto3" json:"resource,omitempty"` +} + +func (x *RewriteResponse) Reset() { + *x = RewriteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RewriteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RewriteResponse) ProtoMessage() {} + +func (x *RewriteResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead. +func (*RewriteResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} +} + +func (x *RewriteResponse) GetTotalBytesRewritten() int64 { + if x != nil { + return x.TotalBytesRewritten + } + return 0 +} + +func (x *RewriteResponse) GetObjectSize() int64 { + if x != nil { + return x.ObjectSize + } + return 0 +} + +func (x *RewriteResponse) GetDone() bool { + if x != nil { + return x.Done + } + return false +} + +func (x *RewriteResponse) GetRewriteToken() string { + if x != nil { + return x.RewriteToken + } + return "" +} + +func (x *RewriteResponse) GetResource() *Object { + if x != nil { + return x.Resource + } + return nil +} + +// Request message StartResumableWrite. +type StartResumableWriteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The destination bucket, object, and metadata, as well as any + // preconditions. + WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,1,opt,name=write_object_spec,json=writeObjectSpec,proto3" json:"write_object_spec,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,3,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` + // The checksums of the complete object. This will be used to validate the + // uploaded object. For each upload, object_checksums can be provided with + // either StartResumableWriteRequest or the WriteObjectRequest with + // finish_write set to `true`. + ObjectChecksums *ObjectChecksums `protobuf:"bytes,5,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` +} + +func (x *StartResumableWriteRequest) Reset() { + *x = StartResumableWriteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartResumableWriteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartResumableWriteRequest) ProtoMessage() {} + +func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead. +func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} +} + +func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec { + if x != nil { + return x.WriteObjectSpec + } + return nil +} + +func (x *StartResumableWriteRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +func (x *StartResumableWriteRequest) GetObjectChecksums() *ObjectChecksums { + if x != nil { + return x.ObjectChecksums + } + return nil +} + +// Response object for `StartResumableWrite`. +type StartResumableWriteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The upload_id of the newly started resumable write operation. This + // value should be copied into the `WriteObjectRequest.upload_id` field. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` +} + +func (x *StartResumableWriteResponse) Reset() { + *x = StartResumableWriteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartResumableWriteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartResumableWriteResponse) ProtoMessage() {} + +func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead. +func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} +} + +func (x *StartResumableWriteResponse) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +// Request message for UpdateObject. +type UpdateObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The object to update. + // The object's bucket and name fields are used to identify the object to + // update. If present, the object's generation field selects a specific + // revision of this object whose metadata should be updated. Otherwise, + // assumes the live version of the object. + Object *Object `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,2,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,4,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // Apply a predefined set of access controls to this object. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + PredefinedAcl string `protobuf:"bytes,10,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` + // Required. List of fields to be updated. + // + // To specify ALL fields, equivalent to the JSON API's "update" function, + // specify a single field with the value `*`. Note: not recommended. If a new + // field is introduced at a later time, an older client updating with the `*` + // may accidentally reset the new field's value. + // + // Not specifying any fields is an error. + // Not specifying a field while setting that field to a non-default value is + // an error. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *UpdateObjectRequest) Reset() { + *x = UpdateObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateObjectRequest) ProtoMessage() {} + +func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead. +func (*UpdateObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} +} + +func (x *UpdateObjectRequest) GetObject() *Object { + if x != nil { + return x.Object + } + return nil +} + +func (x *UpdateObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *UpdateObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *UpdateObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *UpdateObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *UpdateObjectRequest) GetPredefinedAcl() string { + if x != nil { + return x.PredefinedAcl + } + return "" +} + +func (x *UpdateObjectRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +func (x *UpdateObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +// Request message for GetServiceAccount. +type GetServiceAccountRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Project ID, in the format of "projects/". + // can be the project ID or project number. + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` +} + +func (x *GetServiceAccountRequest) Reset() { + *x = GetServiceAccountRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetServiceAccountRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetServiceAccountRequest) ProtoMessage() {} + +func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetServiceAccountRequest.ProtoReflect.Descriptor instead. +func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} +} + +func (x *GetServiceAccountRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +// Request message for CreateHmacKey. +type CreateHmacKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The project that the HMAC-owning service account lives in, in the + // format of "projects/". can be the + // project ID or project number. + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` + // Required. The service account to create the HMAC for. + ServiceAccountEmail string `protobuf:"bytes,2,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` +} + +func (x *CreateHmacKeyRequest) Reset() { + *x = CreateHmacKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateHmacKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateHmacKeyRequest) ProtoMessage() {} + +func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead. +func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} +} + +func (x *CreateHmacKeyRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *CreateHmacKeyRequest) GetServiceAccountEmail() string { + if x != nil { + return x.ServiceAccountEmail + } + return "" +} + +// Create hmac response. The only time the secret for an HMAC will be returned. +type CreateHmacKeyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Key metadata. + Metadata *HmacKeyMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // HMAC key secret material. + // In raw bytes format (not base64-encoded). + SecretKeyBytes []byte `protobuf:"bytes,3,opt,name=secret_key_bytes,json=secretKeyBytes,proto3" json:"secret_key_bytes,omitempty"` +} + +func (x *CreateHmacKeyResponse) Reset() { + *x = CreateHmacKeyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateHmacKeyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateHmacKeyResponse) ProtoMessage() {} + +func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateHmacKeyResponse.ProtoReflect.Descriptor instead. +func (*CreateHmacKeyResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} +} + +func (x *CreateHmacKeyResponse) GetMetadata() *HmacKeyMetadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *CreateHmacKeyResponse) GetSecretKeyBytes() []byte { + if x != nil { + return x.SecretKeyBytes + } + return nil +} + +// Request object to delete a given HMAC key. +type DeleteHmacKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The identifying key for the HMAC to delete. + AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` + // Required. The project that owns the HMAC key, in the format of + // "projects/". + // can be the project ID or project number. + Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` +} + +func (x *DeleteHmacKeyRequest) Reset() { + *x = DeleteHmacKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteHmacKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteHmacKeyRequest) ProtoMessage() {} + +func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteHmacKeyRequest.ProtoReflect.Descriptor instead. +func (*DeleteHmacKeyRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} +} + +func (x *DeleteHmacKeyRequest) GetAccessId() string { + if x != nil { + return x.AccessId + } + return "" +} + +func (x *DeleteHmacKeyRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +// Request object to get metadata on a given HMAC key. +type GetHmacKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The identifying key for the HMAC to delete. + AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` + // Required. The project the HMAC key lies in, in the format of + // "projects/". + // can be the project ID or project number. + Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` +} + +func (x *GetHmacKeyRequest) Reset() { + *x = GetHmacKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetHmacKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetHmacKeyRequest) ProtoMessage() {} + +func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetHmacKeyRequest.ProtoReflect.Descriptor instead. +func (*GetHmacKeyRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} +} + +func (x *GetHmacKeyRequest) GetAccessId() string { + if x != nil { + return x.AccessId + } + return "" +} + +func (x *GetHmacKeyRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +// Request to fetch a list of HMAC keys under a given project. +type ListHmacKeysRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The project to list HMAC keys for, in the format of + // "projects/". + // can be the project ID or project number. + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` + // The maximum number of keys to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // A previously returned token from ListHmacKeysResponse to get the next page. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // If set, filters to only return HMAC keys for specified service account. + ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` + // If set, return deleted keys that have not yet been wiped out. + ShowDeletedKeys bool `protobuf:"varint,5,opt,name=show_deleted_keys,json=showDeletedKeys,proto3" json:"show_deleted_keys,omitempty"` +} + +func (x *ListHmacKeysRequest) Reset() { + *x = ListHmacKeysRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListHmacKeysRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListHmacKeysRequest) ProtoMessage() {} + +func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListHmacKeysRequest.ProtoReflect.Descriptor instead. +func (*ListHmacKeysRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} +} + +func (x *ListHmacKeysRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *ListHmacKeysRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListHmacKeysRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListHmacKeysRequest) GetServiceAccountEmail() string { + if x != nil { + return x.ServiceAccountEmail + } + return "" +} + +func (x *ListHmacKeysRequest) GetShowDeletedKeys() bool { + if x != nil { + return x.ShowDeletedKeys + } + return false +} + +// Hmac key list response with next page information. +type ListHmacKeysResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of items. + HmacKeys []*HmacKeyMetadata `protobuf:"bytes,1,rep,name=hmac_keys,json=hmacKeys,proto3" json:"hmac_keys,omitempty"` + // The continuation token, used to page through large result sets. Provide + // this value in a subsequent request to return the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListHmacKeysResponse) Reset() { + *x = ListHmacKeysResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListHmacKeysResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListHmacKeysResponse) ProtoMessage() {} + +func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListHmacKeysResponse.ProtoReflect.Descriptor instead. +func (*ListHmacKeysResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} +} + +func (x *ListHmacKeysResponse) GetHmacKeys() []*HmacKeyMetadata { + if x != nil { + return x.HmacKeys + } + return nil +} + +func (x *ListHmacKeysResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request object to update an HMAC key state. +// HmacKeyMetadata.state is required and the only writable field in +// UpdateHmacKey operation. Specifying fields other than state will result in an +// error. +type UpdateHmacKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The HMAC key to update. + // If present, the hmac_key's `id` field will be used to identify the key. + // Otherwise, the hmac_key's access_id and project fields will be used to + // identify the key. + HmacKey *HmacKeyMetadata `protobuf:"bytes,1,opt,name=hmac_key,json=hmacKey,proto3" json:"hmac_key,omitempty"` + // Update mask for hmac_key. + // Not specifying any fields will mean only the `state` field is updated to + // the value specified in `hmac_key`. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateHmacKeyRequest) Reset() { + *x = UpdateHmacKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateHmacKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateHmacKeyRequest) ProtoMessage() {} + +func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateHmacKeyRequest.ProtoReflect.Descriptor instead. +func (*UpdateHmacKeyRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} +} + +func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata { + if x != nil { + return x.HmacKey + } + return nil +} + +func (x *UpdateHmacKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Parameters that can be passed to any object request. +type CommonObjectRequestParams struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Encryption algorithm used with the Customer-Supplied Encryption Keys + // feature. + EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=encryption_algorithm,json=encryptionAlgorithm,proto3" json:"encryption_algorithm,omitempty"` + // Encryption key used with the Customer-Supplied Encryption Keys feature. + // In raw bytes format (not base64-encoded). + EncryptionKeyBytes []byte `protobuf:"bytes,4,opt,name=encryption_key_bytes,json=encryptionKeyBytes,proto3" json:"encryption_key_bytes,omitempty"` + // SHA256 hash of encryption key used with the Customer-Supplied Encryption + // Keys feature. + EncryptionKeySha256Bytes []byte `protobuf:"bytes,5,opt,name=encryption_key_sha256_bytes,json=encryptionKeySha256Bytes,proto3" json:"encryption_key_sha256_bytes,omitempty"` +} + +func (x *CommonObjectRequestParams) Reset() { + *x = CommonObjectRequestParams{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonObjectRequestParams) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonObjectRequestParams) ProtoMessage() {} + +func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead. +func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} +} + +func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string { + if x != nil { + return x.EncryptionAlgorithm + } + return "" +} + +func (x *CommonObjectRequestParams) GetEncryptionKeyBytes() []byte { + if x != nil { + return x.EncryptionKeyBytes + } + return nil +} + +func (x *CommonObjectRequestParams) GetEncryptionKeySha256Bytes() []byte { + if x != nil { + return x.EncryptionKeySha256Bytes + } + return nil +} + +// Shared constants. +type ServiceConstants struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ServiceConstants) Reset() { + *x = ServiceConstants{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceConstants) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceConstants) ProtoMessage() {} + +func (x *ServiceConstants) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead. +func (*ServiceConstants) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} +} + +// A bucket. +type Bucket struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Immutable. The name of the bucket. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. The user-chosen part of the bucket name. The `{bucket}` + // portion of the `name` field. For globally unique buckets, this is equal to + // the "bucket name" of other Cloud Storage APIs. Example: "pub". + BucketId string `protobuf:"bytes,2,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` + // The etag of the bucket. + // If included in the metadata of an UpdateBucketRequest, the operation will + // only be performed if the etag matches that of the bucket. + Etag string `protobuf:"bytes,29,opt,name=etag,proto3" json:"etag,omitempty"` + // Immutable. The project which owns this bucket, in the format of + // "projects/". + // can be the project ID or project number. + Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` + // Output only. The metadata generation of this bucket. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + Metageneration int64 `protobuf:"varint,4,opt,name=metageneration,proto3" json:"metageneration,omitempty"` + // Immutable. The location of the bucket. Object data for objects in the + // bucket resides in physical storage within this region. Defaults to `US`. + // See the + // [https://developers.google.com/storage/docs/concepts-techniques#specifyinglocations"][developer's + // guide] for the authoritative list. Attempting to update this field after + // the bucket is created will result in an error. + Location string `protobuf:"bytes,5,opt,name=location,proto3" json:"location,omitempty"` + // Output only. The location type of the bucket (region, dual-region, + // multi-region, etc). + LocationType string `protobuf:"bytes,6,opt,name=location_type,json=locationType,proto3" json:"location_type,omitempty"` + // The bucket's default storage class, used whenever no storageClass is + // specified for a newly-created object. This defines how objects in the + // bucket are stored and determines the SLA and the cost of storage. + // If this value is not specified when the bucket is created, it will default + // to `STANDARD`. For more information, see + // https://developers.google.com/storage/docs/storage-classes. + StorageClass string `protobuf:"bytes,7,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` + // The recovery point objective for cross-region replication of the bucket. + // Applicable only for dual- and multi-region buckets. "DEFAULT" uses default + // replication. "ASYNC_TURBO" enables turbo replication, valid for dual-region + // buckets only. If rpo is not specified when the bucket is created, it + // defaults to "DEFAULT". For more information, see + // https://cloud.google.com/storage/docs/turbo-replication. + Rpo string `protobuf:"bytes,27,opt,name=rpo,proto3" json:"rpo,omitempty"` + // Access controls on the bucket. + // If iam_config.uniform_bucket_level_access is enabled on this bucket, + // requests to set, read, or modify acl is an error. + Acl []*BucketAccessControl `protobuf:"bytes,8,rep,name=acl,proto3" json:"acl,omitempty"` + // Default access controls to apply to new objects when no ACL is provided. + // If iam_config.uniform_bucket_level_access is enabled on this bucket, + // requests to set, read, or modify acl is an error. + DefaultObjectAcl []*ObjectAccessControl `protobuf:"bytes,9,rep,name=default_object_acl,json=defaultObjectAcl,proto3" json:"default_object_acl,omitempty"` + // The bucket's lifecycle config. See + // [https://developers.google.com/storage/docs/lifecycle]Lifecycle Management] + // for more information. + Lifecycle *Bucket_Lifecycle `protobuf:"bytes,10,opt,name=lifecycle,proto3" json:"lifecycle,omitempty"` + // Output only. The creation time of the bucket. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // The bucket's [https://www.w3.org/TR/cors/][Cross-Origin Resource Sharing] + // (CORS) config. + Cors []*Bucket_Cors `protobuf:"bytes,12,rep,name=cors,proto3" json:"cors,omitempty"` + // Output only. The modification time of the bucket. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + // The default value for event-based hold on newly created objects in this + // bucket. Event-based hold is a way to retain objects indefinitely until an + // event occurs, signified by the + // hold's release. After being released, such objects will be subject to + // bucket-level retention (if any). One sample use case of this flag is for + // banks to hold loan documents for at least 3 years after loan is paid in + // full. Here, bucket-level retention is 3 years and the event is loan being + // paid in full. In this example, these objects will be held intact for any + // number of years until the event has occurred (event-based hold on the + // object is released) and then 3 more years after that. That means retention + // duration of the objects begins from the moment event-based hold + // transitioned from true to false. Objects under event-based hold cannot be + // deleted, overwritten or archived until the hold is removed. + DefaultEventBasedHold bool `protobuf:"varint,14,opt,name=default_event_based_hold,json=defaultEventBasedHold,proto3" json:"default_event_based_hold,omitempty"` + // User-provided labels, in key/value pairs. + Labels map[string]string `protobuf:"bytes,15,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The bucket's website config, controlling how the service behaves + // when accessing bucket contents as a web site. See the + // [https://cloud.google.com/storage/docs/static-website][Static Website + // Examples] for more information. + Website *Bucket_Website `protobuf:"bytes,16,opt,name=website,proto3" json:"website,omitempty"` + // The bucket's versioning config. + Versioning *Bucket_Versioning `protobuf:"bytes,17,opt,name=versioning,proto3" json:"versioning,omitempty"` + // The bucket's logging config, which defines the destination bucket + // and name prefix (if any) for the current bucket's logs. + Logging *Bucket_Logging `protobuf:"bytes,18,opt,name=logging,proto3" json:"logging,omitempty"` + // Output only. The owner of the bucket. This is always the project team's + // owner group. + Owner *Owner `protobuf:"bytes,19,opt,name=owner,proto3" json:"owner,omitempty"` + // Encryption config for a bucket. + Encryption *Bucket_Encryption `protobuf:"bytes,20,opt,name=encryption,proto3" json:"encryption,omitempty"` + // The bucket's billing config. + Billing *Bucket_Billing `protobuf:"bytes,21,opt,name=billing,proto3" json:"billing,omitempty"` + // The bucket's retention policy. The retention policy enforces a minimum + // retention time for all objects contained in the bucket, based on their + // creation time. Any attempt to overwrite or delete objects younger than the + // retention period will result in a PERMISSION_DENIED error. An unlocked + // retention policy can be modified or removed from the bucket via a + // storage.buckets.update operation. A locked retention policy cannot be + // removed or shortened in duration for the lifetime of the bucket. + // Attempting to remove or decrease period of a locked retention policy will + // result in a PERMISSION_DENIED error. + RetentionPolicy *Bucket_RetentionPolicy `protobuf:"bytes,22,opt,name=retention_policy,json=retentionPolicy,proto3" json:"retention_policy,omitempty"` + // The bucket's IAM config. + IamConfig *Bucket_IamConfig `protobuf:"bytes,23,opt,name=iam_config,json=iamConfig,proto3" json:"iam_config,omitempty"` + // Reserved for future use. + SatisfiesPzs bool `protobuf:"varint,25,opt,name=satisfies_pzs,json=satisfiesPzs,proto3" json:"satisfies_pzs,omitempty"` + // Configuration that, if present, specifies the data placement for a + // [https://cloud.google.com/storage/docs/use-dual-regions][Dual Region]. + CustomPlacementConfig *Bucket_CustomPlacementConfig `protobuf:"bytes,26,opt,name=custom_placement_config,json=customPlacementConfig,proto3" json:"custom_placement_config,omitempty"` + // The bucket's Autoclass configuration. If there is no configuration, the + // Autoclass feature will be disabled and have no effect on the bucket. + Autoclass *Bucket_Autoclass `protobuf:"bytes,28,opt,name=autoclass,proto3" json:"autoclass,omitempty"` +} + +func (x *Bucket) Reset() { + *x = Bucket{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket) ProtoMessage() {} + +func (x *Bucket) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. +func (*Bucket) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} +} + +func (x *Bucket) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Bucket) GetBucketId() string { + if x != nil { + return x.BucketId + } + return "" +} + +func (x *Bucket) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *Bucket) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *Bucket) GetMetageneration() int64 { + if x != nil { + return x.Metageneration + } + return 0 +} + +func (x *Bucket) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Bucket) GetLocationType() string { + if x != nil { + return x.LocationType + } + return "" +} + +func (x *Bucket) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +func (x *Bucket) GetRpo() string { + if x != nil { + return x.Rpo + } + return "" +} + +func (x *Bucket) GetAcl() []*BucketAccessControl { + if x != nil { + return x.Acl + } + return nil +} + +func (x *Bucket) GetDefaultObjectAcl() []*ObjectAccessControl { + if x != nil { + return x.DefaultObjectAcl + } + return nil +} + +func (x *Bucket) GetLifecycle() *Bucket_Lifecycle { + if x != nil { + return x.Lifecycle + } + return nil +} + +func (x *Bucket) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *Bucket) GetCors() []*Bucket_Cors { + if x != nil { + return x.Cors + } + return nil +} + +func (x *Bucket) GetUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.UpdateTime + } + return nil +} + +func (x *Bucket) GetDefaultEventBasedHold() bool { + if x != nil { + return x.DefaultEventBasedHold + } + return false +} + +func (x *Bucket) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Bucket) GetWebsite() *Bucket_Website { + if x != nil { + return x.Website + } + return nil +} + +func (x *Bucket) GetVersioning() *Bucket_Versioning { + if x != nil { + return x.Versioning + } + return nil +} + +func (x *Bucket) GetLogging() *Bucket_Logging { + if x != nil { + return x.Logging + } + return nil +} + +func (x *Bucket) GetOwner() *Owner { + if x != nil { + return x.Owner + } + return nil +} + +func (x *Bucket) GetEncryption() *Bucket_Encryption { + if x != nil { + return x.Encryption + } + return nil +} + +func (x *Bucket) GetBilling() *Bucket_Billing { + if x != nil { + return x.Billing + } + return nil +} + +func (x *Bucket) GetRetentionPolicy() *Bucket_RetentionPolicy { + if x != nil { + return x.RetentionPolicy + } + return nil +} + +func (x *Bucket) GetIamConfig() *Bucket_IamConfig { + if x != nil { + return x.IamConfig + } + return nil +} + +func (x *Bucket) GetSatisfiesPzs() bool { + if x != nil { + return x.SatisfiesPzs + } + return false +} + +func (x *Bucket) GetCustomPlacementConfig() *Bucket_CustomPlacementConfig { + if x != nil { + return x.CustomPlacementConfig + } + return nil +} + +func (x *Bucket) GetAutoclass() *Bucket_Autoclass { + if x != nil { + return x.Autoclass + } + return nil +} + +// An access-control entry. +type BucketAccessControl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The access permission for the entity. + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + // The ID of the access-control entry. + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + // The entity holding the permission, in one of the following forms: + // * `user-{userid}` + // * `user-{email}` + // * `group-{groupid}` + // * `group-{email}` + // * `domain-{domain}` + // * `project-{team}-{projectnumber}` + // * `project-{team}-{projectid}` + // * `allUsers` + // * `allAuthenticatedUsers` + // Examples: + // * The user `liz@example.com` would be `user-liz@example.com`. + // * The group `example@googlegroups.com` would be + // `group-example@googlegroups.com` + // * All members of the Google Apps for Business domain `example.com` would be + // `domain-example.com` + // For project entities, `project-{team}-{projectnumber}` format will be + // returned on response. + Entity string `protobuf:"bytes,3,opt,name=entity,proto3" json:"entity,omitempty"` + // Output only. The alternative entity format, if exists. For project + // entities, `project-{team}-{projectid}` format will be returned on response. + EntityAlt string `protobuf:"bytes,9,opt,name=entity_alt,json=entityAlt,proto3" json:"entity_alt,omitempty"` + // The ID for the entity, if any. + EntityId string `protobuf:"bytes,4,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` + // The etag of the BucketAccessControl. + // If included in the metadata of an update or delete request message, the + // operation operation will only be performed if the etag matches that of the + // bucket's BucketAccessControl. + Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` + // The email address associated with the entity, if any. + Email string `protobuf:"bytes,5,opt,name=email,proto3" json:"email,omitempty"` + // The domain associated with the entity, if any. + Domain string `protobuf:"bytes,6,opt,name=domain,proto3" json:"domain,omitempty"` + // The project team associated with the entity, if any. + ProjectTeam *ProjectTeam `protobuf:"bytes,7,opt,name=project_team,json=projectTeam,proto3" json:"project_team,omitempty"` +} + +func (x *BucketAccessControl) Reset() { + *x = BucketAccessControl{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BucketAccessControl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BucketAccessControl) ProtoMessage() {} + +func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BucketAccessControl.ProtoReflect.Descriptor instead. +func (*BucketAccessControl) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} +} + +func (x *BucketAccessControl) GetRole() string { + if x != nil { + return x.Role + } + return "" +} + +func (x *BucketAccessControl) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *BucketAccessControl) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *BucketAccessControl) GetEntityAlt() string { + if x != nil { + return x.EntityAlt + } + return "" +} + +func (x *BucketAccessControl) GetEntityId() string { + if x != nil { + return x.EntityId + } + return "" +} + +func (x *BucketAccessControl) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *BucketAccessControl) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *BucketAccessControl) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *BucketAccessControl) GetProjectTeam() *ProjectTeam { + if x != nil { + return x.ProjectTeam + } + return nil +} + +// Message used to convey content being read or written, along with an optional +// checksum. +type ChecksummedData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The data. + Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + // If set, the CRC32C digest of the content field. + Crc32C *uint32 `protobuf:"fixed32,2,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` +} + +func (x *ChecksummedData) Reset() { + *x = ChecksummedData{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ChecksummedData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChecksummedData) ProtoMessage() {} + +func (x *ChecksummedData) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChecksummedData.ProtoReflect.Descriptor instead. +func (*ChecksummedData) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} +} + +func (x *ChecksummedData) GetContent() []byte { + if x != nil { + return x.Content + } + return nil +} + +func (x *ChecksummedData) GetCrc32C() uint32 { + if x != nil && x.Crc32C != nil { + return *x.Crc32C + } + return 0 +} + +// Message used for storing full (not subrange) object checksums. +type ObjectChecksums struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // CRC32C digest of the object data. Computed by the Cloud Storage service for + // all written objects. + // If set in an WriteObjectRequest, service will validate that the stored + // object matches this checksum. + Crc32C *uint32 `protobuf:"fixed32,1,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` + // 128 bit MD5 hash of the object data. + // For more information about using the MD5 hash, see + // [https://cloud.google.com/storage/docs/hashes-etags#json-api][Hashes and + // ETags: Best Practices]. + // Not all objects will provide an MD5 hash. For example, composite objects + // provide only crc32c hashes. + // This value is equivalent to running `cat object.txt | openssl md5 -binary` + Md5Hash []byte `protobuf:"bytes,2,opt,name=md5_hash,json=md5Hash,proto3" json:"md5_hash,omitempty"` +} + +func (x *ObjectChecksums) Reset() { + *x = ObjectChecksums{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObjectChecksums) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectChecksums) ProtoMessage() {} + +func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead. +func (*ObjectChecksums) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} +} + +func (x *ObjectChecksums) GetCrc32C() uint32 { + if x != nil && x.Crc32C != nil { + return *x.Crc32C + } + return 0 +} + +func (x *ObjectChecksums) GetMd5Hash() []byte { + if x != nil { + return x.Md5Hash + } + return nil +} + +// Hmac Key Metadata, which includes all information other than the secret. +type HmacKeyMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Immutable. Resource name ID of the key in the format + // /. + // can be the project ID or project number. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Immutable. Globally unique id for keys. + AccessId string `protobuf:"bytes,2,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` + // Immutable. Identifies the project that owns the service account of the + // specified HMAC key, in the format "projects/". + // can be the project ID or project number. + Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` + // Output only. Email of the service account the key authenticates as. + ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` + // State of the key. One of ACTIVE, INACTIVE, or DELETED. + // Writable, can be updated by UpdateHmacKey operation. + State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"` + // Output only. The creation time of the HMAC key. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Output only. The last modification time of the HMAC key metadata. + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + // The etag of the HMAC key. + Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` +} + +func (x *HmacKeyMetadata) Reset() { + *x = HmacKeyMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HmacKeyMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HmacKeyMetadata) ProtoMessage() {} + +func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead. +func (*HmacKeyMetadata) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} +} + +func (x *HmacKeyMetadata) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *HmacKeyMetadata) GetAccessId() string { + if x != nil { + return x.AccessId + } + return "" +} + +func (x *HmacKeyMetadata) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *HmacKeyMetadata) GetServiceAccountEmail() string { + if x != nil { + return x.ServiceAccountEmail + } + return "" +} + +func (x *HmacKeyMetadata) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.UpdateTime + } + return nil +} + +func (x *HmacKeyMetadata) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +// A directive to publish Pub/Sub notifications upon changes to a bucket. +type Notification struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of this notification. + // Format: + // `projects/{project}/buckets/{bucket}/notificationConfigs/{notification}` + // The `{project}` portion may be `_` for globally unique buckets. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The Pub/Sub topic to which this subscription publishes. Formatted + // as: + // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}' + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + // The etag of the Notification. + // If included in the metadata of GetNotificationRequest, the operation will + // only be performed if the etag matches that of the Notification. + Etag string `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"` + // If present, only send notifications about listed event types. If empty, + // sent notifications for all event types. + EventTypes []string `protobuf:"bytes,3,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"` + // A list of additional attributes to attach to each Pub/Sub + // message published for this notification subscription. + CustomAttributes map[string]string `protobuf:"bytes,4,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If present, only apply this notification config to object names that + // begin with this prefix. + ObjectNamePrefix string `protobuf:"bytes,5,opt,name=object_name_prefix,json=objectNamePrefix,proto3" json:"object_name_prefix,omitempty"` + // Required. The desired content of the Payload. + PayloadFormat string `protobuf:"bytes,6,opt,name=payload_format,json=payloadFormat,proto3" json:"payload_format,omitempty"` +} + +func (x *Notification) Reset() { + *x = Notification{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Notification) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Notification) ProtoMessage() {} + +func (x *Notification) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Notification.ProtoReflect.Descriptor instead. +func (*Notification) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} +} + +func (x *Notification) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Notification) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *Notification) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *Notification) GetEventTypes() []string { + if x != nil { + return x.EventTypes + } + return nil +} + +func (x *Notification) GetCustomAttributes() map[string]string { + if x != nil { + return x.CustomAttributes + } + return nil +} + +func (x *Notification) GetObjectNamePrefix() string { + if x != nil { + return x.ObjectNamePrefix + } + return "" +} + +func (x *Notification) GetPayloadFormat() string { + if x != nil { + return x.PayloadFormat + } + return "" +} + +// Describes the Customer-Supplied Encryption Key mechanism used to store an +// Object's data at rest. +type CustomerEncryption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The encryption algorithm. + EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=encryption_algorithm,json=encryptionAlgorithm,proto3" json:"encryption_algorithm,omitempty"` + // SHA256 hash value of the encryption key. + // In raw bytes format (not base64-encoded). + KeySha256Bytes []byte `protobuf:"bytes,3,opt,name=key_sha256_bytes,json=keySha256Bytes,proto3" json:"key_sha256_bytes,omitempty"` +} + +func (x *CustomerEncryption) Reset() { + *x = CustomerEncryption{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomerEncryption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomerEncryption) ProtoMessage() {} + +func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead. +func (*CustomerEncryption) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} +} + +func (x *CustomerEncryption) GetEncryptionAlgorithm() string { + if x != nil { + return x.EncryptionAlgorithm + } + return "" +} + +func (x *CustomerEncryption) GetKeySha256Bytes() []byte { + if x != nil { + return x.KeySha256Bytes + } + return nil +} + +// An object. +type Object struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Immutable. The name of this object. Nearly any sequence of unicode + // characters is valid. See + // [Guidelines](https://cloud.google.com/storage/docs/objects#naming). + // Example: `test.txt` + // The `name` field by itself does not uniquely identify a Cloud Storage + // object. A Cloud Storage object is uniquely identified by the tuple of + // (bucket, object, generation). + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Immutable. The name of the bucket containing this object. + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // The etag of the object. + // If included in the metadata of an update or delete request message, the + // operation will only be performed if the etag matches that of the live + // object. + Etag string `protobuf:"bytes,27,opt,name=etag,proto3" json:"etag,omitempty"` + // Immutable. The content generation of this object. Used for object + // versioning. Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // Output only. The version of the metadata for this generation of this + // object. Used for preconditions and for detecting changes in metadata. A + // metageneration number is only meaningful in the context of a particular + // generation of a particular object. Attempting to set or update this field + // will result in a [FieldViolation][google.rpc.BadRequest.FieldViolation]. + Metageneration int64 `protobuf:"varint,4,opt,name=metageneration,proto3" json:"metageneration,omitempty"` + // Storage class of the object. + StorageClass string `protobuf:"bytes,5,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` + // Output only. Content-Length of the object data in bytes, matching + // [https://tools.ietf.org/html/rfc7230#section-3.3.2][RFC 7230 §3.3.2]. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + Size int64 `protobuf:"varint,6,opt,name=size,proto3" json:"size,omitempty"` + // Content-Encoding of the object data, matching + // [https://tools.ietf.org/html/rfc7231#section-3.1.2.2][RFC 7231 §3.1.2.2] + ContentEncoding string `protobuf:"bytes,7,opt,name=content_encoding,json=contentEncoding,proto3" json:"content_encoding,omitempty"` + // Content-Disposition of the object data, matching + // [https://tools.ietf.org/html/rfc6266][RFC 6266]. + ContentDisposition string `protobuf:"bytes,8,opt,name=content_disposition,json=contentDisposition,proto3" json:"content_disposition,omitempty"` + // Cache-Control directive for the object data, matching + // [https://tools.ietf.org/html/rfc7234#section-5.2"][RFC 7234 §5.2]. + // If omitted, and the object is accessible to all anonymous users, the + // default will be `public, max-age=3600`. + CacheControl string `protobuf:"bytes,9,opt,name=cache_control,json=cacheControl,proto3" json:"cache_control,omitempty"` + // Access controls on the object. + // If iam_config.uniform_bucket_level_access is enabled on the parent + // bucket, requests to set, read, or modify acl is an error. + Acl []*ObjectAccessControl `protobuf:"bytes,10,rep,name=acl,proto3" json:"acl,omitempty"` + // Content-Language of the object data, matching + // [https://tools.ietf.org/html/rfc7231#section-3.1.3.2][RFC 7231 §3.1.3.2]. + ContentLanguage string `protobuf:"bytes,11,opt,name=content_language,json=contentLanguage,proto3" json:"content_language,omitempty"` + // Output only. The deletion time of the object. Will be returned if and only + // if this version of the object has been deleted. Attempting to set or update + // this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + DeleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` + // Content-Type of the object data, matching + // [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5]. + // If an object is stored without a Content-Type, it is served as + // `application/octet-stream`. + ContentType string `protobuf:"bytes,13,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // Output only. The creation time of the object. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Output only. Number of underlying components that make up this object. + // Components are accumulated by compose operations. Attempting to set or + // update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + ComponentCount int32 `protobuf:"varint,15,opt,name=component_count,json=componentCount,proto3" json:"component_count,omitempty"` + // Output only. Hashes for the data part of this object. This field is used + // for output only and will be silently ignored if provided in requests. + Checksums *ObjectChecksums `protobuf:"bytes,16,opt,name=checksums,proto3" json:"checksums,omitempty"` + // Output only. The modification time of the object metadata. + // Set initially to object creation time and then updated whenever any + // metadata of the object changes. This includes changes made by a requester, + // such as modifying custom metadata, as well as changes made by Cloud Storage + // on behalf of a requester, such as changing the storage class based on an + // Object Lifecycle Configuration. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + // Cloud KMS Key used to encrypt this object, if the object is encrypted by + // such a key. + KmsKey string `protobuf:"bytes,18,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` + // Output only. The time at which the object's storage class was last changed. + // When the object is initially created, it will be set to time_created. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + UpdateStorageClassTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=update_storage_class_time,json=updateStorageClassTime,proto3" json:"update_storage_class_time,omitempty"` + // Whether an object is under temporary hold. While this flag is set to true, + // the object is protected against deletion and overwrites. A common use case + // of this flag is regulatory investigations where objects need to be retained + // while the investigation is ongoing. Note that unlike event-based hold, + // temporary hold does not impact retention expiration time of an object. + TemporaryHold bool `protobuf:"varint,20,opt,name=temporary_hold,json=temporaryHold,proto3" json:"temporary_hold,omitempty"` + // A server-determined value that specifies the earliest time that the + // object's retention period expires. + // Note 1: This field is not provided for objects with an active event-based + // hold, since retention expiration is unknown until the hold is removed. + // Note 2: This value can be provided even when temporary hold is set (so that + // the user can reason about policy without having to first unset the + // temporary hold). + RetentionExpireTime *timestamppb.Timestamp `protobuf:"bytes,21,opt,name=retention_expire_time,json=retentionExpireTime,proto3" json:"retention_expire_time,omitempty"` + // User-provided metadata, in key/value pairs. + Metadata map[string]string `protobuf:"bytes,22,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Whether an object is under event-based hold. + // An event-based hold is a way to force the retention of an object until + // after some event occurs. Once the hold is released by explicitly setting + // this field to false, the object will become subject to any bucket-level + // retention policy, except that the retention duration will be calculated + // from the time the event based hold was lifted, rather than the time the + // object was created. + // + // In a WriteObject request, not setting this field implies that the value + // should be taken from the parent bucket's "default_event_based_hold" field. + // In a response, this field will always be set to true or false. + EventBasedHold *bool `protobuf:"varint,23,opt,name=event_based_hold,json=eventBasedHold,proto3,oneof" json:"event_based_hold,omitempty"` + // Output only. The owner of the object. This will always be the uploader of + // the object. Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + Owner *Owner `protobuf:"bytes,24,opt,name=owner,proto3" json:"owner,omitempty"` + // Metadata of Customer-Supplied Encryption Key, if the object is encrypted by + // such a key. + CustomerEncryption *CustomerEncryption `protobuf:"bytes,25,opt,name=customer_encryption,json=customerEncryption,proto3" json:"customer_encryption,omitempty"` + // A user-specified timestamp set on an object. + CustomTime *timestamppb.Timestamp `protobuf:"bytes,26,opt,name=custom_time,json=customTime,proto3" json:"custom_time,omitempty"` +} + +func (x *Object) Reset() { + *x = Object{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Object) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Object) ProtoMessage() {} + +func (x *Object) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Object.ProtoReflect.Descriptor instead. +func (*Object) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} +} + +func (x *Object) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Object) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *Object) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *Object) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *Object) GetMetageneration() int64 { + if x != nil { + return x.Metageneration + } + return 0 +} + +func (x *Object) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +func (x *Object) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *Object) GetContentEncoding() string { + if x != nil { + return x.ContentEncoding + } + return "" +} + +func (x *Object) GetContentDisposition() string { + if x != nil { + return x.ContentDisposition + } + return "" +} + +func (x *Object) GetCacheControl() string { + if x != nil { + return x.CacheControl + } + return "" +} + +func (x *Object) GetAcl() []*ObjectAccessControl { + if x != nil { + return x.Acl + } + return nil +} + +func (x *Object) GetContentLanguage() string { + if x != nil { + return x.ContentLanguage + } + return "" +} + +func (x *Object) GetDeleteTime() *timestamppb.Timestamp { + if x != nil { + return x.DeleteTime + } + return nil +} + +func (x *Object) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *Object) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *Object) GetComponentCount() int32 { + if x != nil { + return x.ComponentCount + } + return 0 +} + +func (x *Object) GetChecksums() *ObjectChecksums { + if x != nil { + return x.Checksums + } + return nil +} + +func (x *Object) GetUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.UpdateTime + } + return nil +} + +func (x *Object) GetKmsKey() string { + if x != nil { + return x.KmsKey + } + return "" +} + +func (x *Object) GetUpdateStorageClassTime() *timestamppb.Timestamp { + if x != nil { + return x.UpdateStorageClassTime + } + return nil +} + +func (x *Object) GetTemporaryHold() bool { + if x != nil { + return x.TemporaryHold + } + return false +} + +func (x *Object) GetRetentionExpireTime() *timestamppb.Timestamp { + if x != nil { + return x.RetentionExpireTime + } + return nil +} + +func (x *Object) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Object) GetEventBasedHold() bool { + if x != nil && x.EventBasedHold != nil { + return *x.EventBasedHold + } + return false +} + +func (x *Object) GetOwner() *Owner { + if x != nil { + return x.Owner + } + return nil +} + +func (x *Object) GetCustomerEncryption() *CustomerEncryption { + if x != nil { + return x.CustomerEncryption + } + return nil +} + +func (x *Object) GetCustomTime() *timestamppb.Timestamp { + if x != nil { + return x.CustomTime + } + return nil +} + +// An access-control entry. +type ObjectAccessControl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The access permission for the entity. + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + // The ID of the access-control entry. + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + // The entity holding the permission, in one of the following forms: + // * `user-{userid}` + // * `user-{email}` + // * `group-{groupid}` + // * `group-{email}` + // * `domain-{domain}` + // * `project-{team}-{projectnumber}` + // * `project-{team}-{projectid}` + // * `allUsers` + // * `allAuthenticatedUsers` + // Examples: + // * The user `liz@example.com` would be `user-liz@example.com`. + // * The group `example@googlegroups.com` would be + // `group-example@googlegroups.com`. + // * All members of the Google Apps for Business domain `example.com` would be + // `domain-example.com`. + // For project entities, `project-{team}-{projectnumber}` format will be + // returned on response. + Entity string `protobuf:"bytes,3,opt,name=entity,proto3" json:"entity,omitempty"` + // Output only. The alternative entity format, if exists. For project + // entities, `project-{team}-{projectid}` format will be returned on response. + EntityAlt string `protobuf:"bytes,9,opt,name=entity_alt,json=entityAlt,proto3" json:"entity_alt,omitempty"` + // The ID for the entity, if any. + EntityId string `protobuf:"bytes,4,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` + // The etag of the ObjectAccessControl. + // If included in the metadata of an update or delete request message, the + // operation will only be performed if the etag matches that of the live + // object's ObjectAccessControl. + Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` + // The email address associated with the entity, if any. + Email string `protobuf:"bytes,5,opt,name=email,proto3" json:"email,omitempty"` + // The domain associated with the entity, if any. + Domain string `protobuf:"bytes,6,opt,name=domain,proto3" json:"domain,omitempty"` + // The project team associated with the entity, if any. + ProjectTeam *ProjectTeam `protobuf:"bytes,7,opt,name=project_team,json=projectTeam,proto3" json:"project_team,omitempty"` +} + +func (x *ObjectAccessControl) Reset() { + *x = ObjectAccessControl{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObjectAccessControl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectAccessControl) ProtoMessage() {} + +func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead. +func (*ObjectAccessControl) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} +} + +func (x *ObjectAccessControl) GetRole() string { + if x != nil { + return x.Role + } + return "" +} + +func (x *ObjectAccessControl) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ObjectAccessControl) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *ObjectAccessControl) GetEntityAlt() string { + if x != nil { + return x.EntityAlt + } + return "" +} + +func (x *ObjectAccessControl) GetEntityId() string { + if x != nil { + return x.EntityId + } + return "" +} + +func (x *ObjectAccessControl) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *ObjectAccessControl) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *ObjectAccessControl) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ObjectAccessControl) GetProjectTeam() *ProjectTeam { + if x != nil { + return x.ProjectTeam + } + return nil +} + +// The result of a call to Objects.ListObjects +type ListObjectsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of items. + Objects []*Object `protobuf:"bytes,1,rep,name=objects,proto3" json:"objects,omitempty"` + // The list of prefixes of objects matching-but-not-listed up to and including + // the requested delimiter. + Prefixes []string `protobuf:"bytes,2,rep,name=prefixes,proto3" json:"prefixes,omitempty"` + // The continuation token, used to page through large result sets. Provide + // this value in a subsequent request to return the next page of results. + NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListObjectsResponse) Reset() { + *x = ListObjectsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListObjectsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListObjectsResponse) ProtoMessage() {} + +func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead. +func (*ListObjectsResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} +} + +func (x *ListObjectsResponse) GetObjects() []*Object { + if x != nil { + return x.Objects + } + return nil +} + +func (x *ListObjectsResponse) GetPrefixes() []string { + if x != nil { + return x.Prefixes + } + return nil +} + +func (x *ListObjectsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Represents the Viewers, Editors, or Owners of a given project. +type ProjectTeam struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The project number. + ProjectNumber string `protobuf:"bytes,1,opt,name=project_number,json=projectNumber,proto3" json:"project_number,omitempty"` + // The team. + Team string `protobuf:"bytes,2,opt,name=team,proto3" json:"team,omitempty"` +} + +func (x *ProjectTeam) Reset() { + *x = ProjectTeam{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProjectTeam) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProjectTeam) ProtoMessage() {} + +func (x *ProjectTeam) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead. +func (*ProjectTeam) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} +} + +func (x *ProjectTeam) GetProjectNumber() string { + if x != nil { + return x.ProjectNumber + } + return "" +} + +func (x *ProjectTeam) GetTeam() string { + if x != nil { + return x.Team + } + return "" +} + +// A service account, owned by Cloud Storage, which may be used when taking +// action on behalf of a given project, for example to publish Pub/Sub +// notifications or to retrieve security keys. +type ServiceAccount struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ID of the notification. + EmailAddress string `protobuf:"bytes,1,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"` +} + +func (x *ServiceAccount) Reset() { + *x = ServiceAccount{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceAccount) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceAccount) ProtoMessage() {} + +func (x *ServiceAccount) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead. +func (*ServiceAccount) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} +} + +func (x *ServiceAccount) GetEmailAddress() string { + if x != nil { + return x.EmailAddress + } + return "" +} + +// The owner of a specific resource. +type Owner struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The entity, in the form `user-`*userId*. + Entity string `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"` + // The ID for the entity. + EntityId string `protobuf:"bytes,2,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` +} + +func (x *Owner) Reset() { + *x = Owner{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Owner) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Owner) ProtoMessage() {} + +func (x *Owner) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Owner.ProtoReflect.Descriptor instead. +func (*Owner) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52} +} + +func (x *Owner) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *Owner) GetEntityId() string { + if x != nil { + return x.EntityId + } + return "" +} + +// Specifies a requested range of bytes to download. +type ContentRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The starting offset of the object data. + Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + // The ending offset of the object data. + End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` + // The complete length of the object data. + CompleteLength int64 `protobuf:"varint,3,opt,name=complete_length,json=completeLength,proto3" json:"complete_length,omitempty"` +} + +func (x *ContentRange) Reset() { + *x = ContentRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContentRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContentRange) ProtoMessage() {} + +func (x *ContentRange) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContentRange.ProtoReflect.Descriptor instead. +func (*ContentRange) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53} +} + +func (x *ContentRange) GetStart() int64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *ContentRange) GetEnd() int64 { + if x != nil { + return x.End + } + return 0 +} + +func (x *ContentRange) GetCompleteLength() int64 { + if x != nil { + return x.CompleteLength + } + return 0 +} + +// Description of a source object for a composition request. +type ComposeObjectRequest_SourceObject struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The source object's name. All source objects must reside in the + // same bucket. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The generation of this object to use as the source. + Generation int64 `protobuf:"varint,2,opt,name=generation,proto3" json:"generation,omitempty"` + // Conditions that must be met for this operation to execute. + ObjectPreconditions *ComposeObjectRequest_SourceObject_ObjectPreconditions `protobuf:"bytes,3,opt,name=object_preconditions,json=objectPreconditions,proto3" json:"object_preconditions,omitempty"` +} + +func (x *ComposeObjectRequest_SourceObject) Reset() { + *x = ComposeObjectRequest_SourceObject{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComposeObjectRequest_SourceObject) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComposeObjectRequest_SourceObject) ProtoMessage() {} + +func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComposeObjectRequest_SourceObject.ProtoReflect.Descriptor instead. +func (*ComposeObjectRequest_SourceObject) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *ComposeObjectRequest_SourceObject) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ComposeObjectRequest_SourceObject) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *ComposeObjectRequest_SourceObject) GetObjectPreconditions() *ComposeObjectRequest_SourceObject_ObjectPreconditions { + if x != nil { + return x.ObjectPreconditions + } + return nil +} + +// Preconditions for a source object of a composition request. +type ComposeObjectRequest_SourceObject_ObjectPreconditions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Only perform the composition if the generation of the source object + // that would be used matches this value. If this value and a generation + // are both specified, they must be the same value or the call will fail. + IfGenerationMatch *int64 `protobuf:"varint,1,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` +} + +func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() { + *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {} + +func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComposeObjectRequest_SourceObject_ObjectPreconditions.ProtoReflect.Descriptor instead. +func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0, 0} +} + +func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +// Billing properties of a bucket. +type Bucket_Billing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // When set to true, Requester Pays is enabled for this bucket. + RequesterPays bool `protobuf:"varint,1,opt,name=requester_pays,json=requesterPays,proto3" json:"requester_pays,omitempty"` +} + +func (x *Bucket_Billing) Reset() { + *x = Bucket_Billing{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Billing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Billing) ProtoMessage() {} + +func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead. +func (*Bucket_Billing) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 0} +} + +func (x *Bucket_Billing) GetRequesterPays() bool { + if x != nil { + return x.RequesterPays + } + return false +} + +// Cross-Origin Response sharing (CORS) properties for a bucket. +// For more on Cloud Storage and CORS, see +// https://cloud.google.com/storage/docs/cross-origin. +// For more on CORS in general, see https://tools.ietf.org/html/rfc6454. +type Bucket_Cors struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of Origins eligible to receive CORS response headers. See + // [https://tools.ietf.org/html/rfc6454][RFC 6454] for more on origins. + // Note: "*" is permitted in the list of origins, and means "any Origin". + Origin []string `protobuf:"bytes,1,rep,name=origin,proto3" json:"origin,omitempty"` + // The list of HTTP methods on which to include CORS response headers, + // (`GET`, `OPTIONS`, `POST`, etc) Note: "*" is permitted in the list of + // methods, and means "any method". + Method []string `protobuf:"bytes,2,rep,name=method,proto3" json:"method,omitempty"` + // The list of HTTP headers other than the + // [https://www.w3.org/TR/cors/#simple-response-header][simple response + // headers] to give permission for the user-agent to share across domains. + ResponseHeader []string `protobuf:"bytes,3,rep,name=response_header,json=responseHeader,proto3" json:"response_header,omitempty"` + // The value, in seconds, to return in the + // [https://www.w3.org/TR/cors/#access-control-max-age-response-header][Access-Control-Max-Age + // header] used in preflight responses. + MaxAgeSeconds int32 `protobuf:"varint,4,opt,name=max_age_seconds,json=maxAgeSeconds,proto3" json:"max_age_seconds,omitempty"` +} + +func (x *Bucket_Cors) Reset() { + *x = Bucket_Cors{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Cors) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Cors) ProtoMessage() {} + +func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead. +func (*Bucket_Cors) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 1} +} + +func (x *Bucket_Cors) GetOrigin() []string { + if x != nil { + return x.Origin + } + return nil +} + +func (x *Bucket_Cors) GetMethod() []string { + if x != nil { + return x.Method + } + return nil +} + +func (x *Bucket_Cors) GetResponseHeader() []string { + if x != nil { + return x.ResponseHeader + } + return nil +} + +func (x *Bucket_Cors) GetMaxAgeSeconds() int32 { + if x != nil { + return x.MaxAgeSeconds + } + return 0 +} + +// Encryption properties of a bucket. +type Bucket_Encryption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the Cloud KMS key that will be used to encrypt objects + // inserted into this bucket, if no encryption method is specified. + DefaultKmsKey string `protobuf:"bytes,1,opt,name=default_kms_key,json=defaultKmsKey,proto3" json:"default_kms_key,omitempty"` +} + +func (x *Bucket_Encryption) Reset() { + *x = Bucket_Encryption{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Encryption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Encryption) ProtoMessage() {} + +func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead. +func (*Bucket_Encryption) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 2} +} + +func (x *Bucket_Encryption) GetDefaultKmsKey() string { + if x != nil { + return x.DefaultKmsKey + } + return "" +} + +// Bucket restriction options. +type Bucket_IamConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Bucket restriction options currently enforced on the bucket. + UniformBucketLevelAccess *Bucket_IamConfig_UniformBucketLevelAccess `protobuf:"bytes,1,opt,name=uniform_bucket_level_access,json=uniformBucketLevelAccess,proto3" json:"uniform_bucket_level_access,omitempty"` + // Whether IAM will enforce public access prevention. Valid values are + // "enforced" or "inherited". + PublicAccessPrevention string `protobuf:"bytes,3,opt,name=public_access_prevention,json=publicAccessPrevention,proto3" json:"public_access_prevention,omitempty"` +} + +func (x *Bucket_IamConfig) Reset() { + *x = Bucket_IamConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_IamConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_IamConfig) ProtoMessage() {} + +func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead. +func (*Bucket_IamConfig) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3} +} + +func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess { + if x != nil { + return x.UniformBucketLevelAccess + } + return nil +} + +func (x *Bucket_IamConfig) GetPublicAccessPrevention() string { + if x != nil { + return x.PublicAccessPrevention + } + return "" +} + +// Lifecycle properties of a bucket. +// For more information, see https://cloud.google.com/storage/docs/lifecycle. +type Bucket_Lifecycle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A lifecycle management rule, which is made of an action to take and the + // condition(s) under which the action will be taken. + Rule []*Bucket_Lifecycle_Rule `protobuf:"bytes,1,rep,name=rule,proto3" json:"rule,omitempty"` +} + +func (x *Bucket_Lifecycle) Reset() { + *x = Bucket_Lifecycle{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Lifecycle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Lifecycle) ProtoMessage() {} + +func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead. +func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4} +} + +func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule { + if x != nil { + return x.Rule + } + return nil +} + +// Logging-related properties of a bucket. +type Bucket_Logging struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The destination bucket where the current bucket's logs should be placed, + // using path format (like `projects/123456/buckets/foo`). + LogBucket string `protobuf:"bytes,1,opt,name=log_bucket,json=logBucket,proto3" json:"log_bucket,omitempty"` + // A prefix for log object names. + LogObjectPrefix string `protobuf:"bytes,2,opt,name=log_object_prefix,json=logObjectPrefix,proto3" json:"log_object_prefix,omitempty"` +} + +func (x *Bucket_Logging) Reset() { + *x = Bucket_Logging{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Logging) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Logging) ProtoMessage() {} + +func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead. +func (*Bucket_Logging) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 5} +} + +func (x *Bucket_Logging) GetLogBucket() string { + if x != nil { + return x.LogBucket + } + return "" +} + +func (x *Bucket_Logging) GetLogObjectPrefix() string { + if x != nil { + return x.LogObjectPrefix + } + return "" +} + +// Retention policy properties of a bucket. +type Bucket_RetentionPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Server-determined value that indicates the time from which policy was + // enforced and effective. + EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"` + // Once locked, an object retention policy cannot be modified. + IsLocked bool `protobuf:"varint,2,opt,name=is_locked,json=isLocked,proto3" json:"is_locked,omitempty"` + // The duration in seconds that objects need to be retained. Retention + // duration must be greater than zero and less than 100 years. Note that + // enforcement of retention periods less than a day is not guaranteed. Such + // periods should only be used for testing purposes. + RetentionPeriod *int64 `protobuf:"varint,3,opt,name=retention_period,json=retentionPeriod,proto3,oneof" json:"retention_period,omitempty"` + // The duration that objects need to be retained. Retention duration must be + // greater than zero and less than 100 years. Note that enforcement of + // retention periods less than a day is not guaranteed. Such periods should + // only be used for testing purposes. Any `nanos` value specified will be + // rounded down to the nearest second. + RetentionDuration *durationpb.Duration `protobuf:"bytes,4,opt,name=retention_duration,json=retentionDuration,proto3" json:"retention_duration,omitempty"` +} + +func (x *Bucket_RetentionPolicy) Reset() { + *x = Bucket_RetentionPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_RetentionPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_RetentionPolicy) ProtoMessage() {} + +func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead. +func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 6} +} + +func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp { + if x != nil { + return x.EffectiveTime + } + return nil +} + +func (x *Bucket_RetentionPolicy) GetIsLocked() bool { + if x != nil { + return x.IsLocked + } + return false +} + +func (x *Bucket_RetentionPolicy) GetRetentionPeriod() int64 { + if x != nil && x.RetentionPeriod != nil { + return *x.RetentionPeriod + } + return 0 +} + +func (x *Bucket_RetentionPolicy) GetRetentionDuration() *durationpb.Duration { + if x != nil { + return x.RetentionDuration + } + return nil +} + +// Properties of a bucket related to versioning. +// For more on Cloud Storage versioning, see +// https://cloud.google.com/storage/docs/object-versioning. +type Bucket_Versioning struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // While set to true, versioning is fully enabled for this bucket. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` +} + +func (x *Bucket_Versioning) Reset() { + *x = Bucket_Versioning{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Versioning) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Versioning) ProtoMessage() {} + +func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead. +func (*Bucket_Versioning) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 7} +} + +func (x *Bucket_Versioning) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +// Properties of a bucket related to accessing the contents as a static +// website. For more on hosting a static website via Cloud Storage, see +// https://cloud.google.com/storage/docs/hosting-static-website. +type Bucket_Website struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If the requested object path is missing, the service will ensure the path + // has a trailing '/', append this suffix, and attempt to retrieve the + // resulting object. This allows the creation of `index.html` + // objects to represent directory pages. + MainPageSuffix string `protobuf:"bytes,1,opt,name=main_page_suffix,json=mainPageSuffix,proto3" json:"main_page_suffix,omitempty"` + // If the requested object path is missing, and any + // `mainPageSuffix` object is missing, if applicable, the service + // will return the named object from this bucket as the content for a + // [https://tools.ietf.org/html/rfc7231#section-6.5.4][404 Not Found] + // result. + NotFoundPage string `protobuf:"bytes,2,opt,name=not_found_page,json=notFoundPage,proto3" json:"not_found_page,omitempty"` +} + +func (x *Bucket_Website) Reset() { + *x = Bucket_Website{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Website) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Website) ProtoMessage() {} + +func (x *Bucket_Website) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead. +func (*Bucket_Website) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 8} +} + +func (x *Bucket_Website) GetMainPageSuffix() string { + if x != nil { + return x.MainPageSuffix + } + return "" +} + +func (x *Bucket_Website) GetNotFoundPage() string { + if x != nil { + return x.NotFoundPage + } + return "" +} + +// Configuration for Custom Dual Regions. It should specify precisely two +// eligible regions within the same Multiregion. More information on regions +// may be found [https://cloud.google.com/storage/docs/locations][here]. +type Bucket_CustomPlacementConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of locations to use for data placement. + DataLocations []string `protobuf:"bytes,1,rep,name=data_locations,json=dataLocations,proto3" json:"data_locations,omitempty"` +} + +func (x *Bucket_CustomPlacementConfig) Reset() { + *x = Bucket_CustomPlacementConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_CustomPlacementConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_CustomPlacementConfig) ProtoMessage() {} + +func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[65] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead. +func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 9} +} + +func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string { + if x != nil { + return x.DataLocations + } + return nil +} + +// Configuration for a bucket's Autoclass feature. +type Bucket_Autoclass struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enables Autoclass. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Output only. Latest instant at which the `enabled` field was set to true + // after being disabled/unconfigured or set to false after being enabled. If + // Autoclass is enabled when the bucket is created, the toggle_time is set + // to the bucket creation time. + ToggleTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"` +} + +func (x *Bucket_Autoclass) Reset() { + *x = Bucket_Autoclass{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Autoclass) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Autoclass) ProtoMessage() {} + +func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead. +func (*Bucket_Autoclass) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 10} +} + +func (x *Bucket_Autoclass) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +func (x *Bucket_Autoclass) GetToggleTime() *timestamppb.Timestamp { + if x != nil { + return x.ToggleTime + } + return nil +} + +// Settings for Uniform Bucket level access. +// See https://cloud.google.com/storage/docs/uniform-bucket-level-access. +type Bucket_IamConfig_UniformBucketLevelAccess struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set, access checks only use bucket-level IAM policies or above. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + // The deadline time for changing + // `iam_config.uniform_bucket_level_access.enabled` from `true` to + // `false`. Mutable until the specified deadline is reached, but not + // afterward. + LockTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"` +} + +func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { + *x = Bucket_IamConfig_UniformBucketLevelAccess{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {} + +func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[68] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead. +func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3, 0} +} + +func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamppb.Timestamp { + if x != nil { + return x.LockTime + } + return nil +} + +// A lifecycle Rule, combining an action to take on an object and a +// condition which will trigger that action. +type Bucket_Lifecycle_Rule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The action to take. + Action *Bucket_Lifecycle_Rule_Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` + // The condition(s) under which the action will be taken. + Condition *Bucket_Lifecycle_Rule_Condition `protobuf:"bytes,2,opt,name=condition,proto3" json:"condition,omitempty"` +} + +func (x *Bucket_Lifecycle_Rule) Reset() { + *x = Bucket_Lifecycle_Rule{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Lifecycle_Rule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Lifecycle_Rule) ProtoMessage() {} + +func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[69] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead. +func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0} +} + +func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action { + if x != nil { + return x.Action + } + return nil +} + +func (x *Bucket_Lifecycle_Rule) GetCondition() *Bucket_Lifecycle_Rule_Condition { + if x != nil { + return x.Condition + } + return nil +} + +// An action to take on an object. +type Bucket_Lifecycle_Rule_Action struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of the action. Currently, only `Delete`, `SetStorageClass`, and + // `AbortIncompleteMultipartUpload` are supported. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Target storage class. Required iff the type of the action is + // SetStorageClass. + StorageClass string `protobuf:"bytes,2,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` +} + +func (x *Bucket_Lifecycle_Rule_Action) Reset() { + *x = Bucket_Lifecycle_Rule_Action{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Lifecycle_Rule_Action) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {} + +func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[70] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead. +func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 0} +} + +func (x *Bucket_Lifecycle_Rule_Action) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Bucket_Lifecycle_Rule_Action) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +// A condition of an object which triggers some action. +type Bucket_Lifecycle_Rule_Condition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Age of an object (in days). This condition is satisfied when an + // object reaches the specified age. + // A value of 0 indicates that all objects immediately match this + // condition. + AgeDays *int32 `protobuf:"varint,1,opt,name=age_days,json=ageDays,proto3,oneof" json:"age_days,omitempty"` + // This condition is satisfied when an object is created before midnight + // of the specified date in UTC. + CreatedBefore *date.Date `protobuf:"bytes,2,opt,name=created_before,json=createdBefore,proto3" json:"created_before,omitempty"` + // Relevant only for versioned objects. If the value is + // `true`, this condition matches live objects; if the value + // is `false`, it matches archived objects. + IsLive *bool `protobuf:"varint,3,opt,name=is_live,json=isLive,proto3,oneof" json:"is_live,omitempty"` + // Relevant only for versioned objects. If the value is N, this + // condition is satisfied when there are at least N versions (including + // the live version) newer than this version of the object. + NumNewerVersions *int32 `protobuf:"varint,4,opt,name=num_newer_versions,json=numNewerVersions,proto3,oneof" json:"num_newer_versions,omitempty"` + // Objects having any of the storage classes specified by this condition + // will be matched. Values include `MULTI_REGIONAL`, `REGIONAL`, + // `NEARLINE`, `COLDLINE`, `STANDARD`, and + // `DURABLE_REDUCED_AVAILABILITY`. + MatchesStorageClass []string `protobuf:"bytes,5,rep,name=matches_storage_class,json=matchesStorageClass,proto3" json:"matches_storage_class,omitempty"` + // Number of days that have elapsed since the custom timestamp set on an + // object. + // The value of the field must be a nonnegative integer. + DaysSinceCustomTime *int32 `protobuf:"varint,7,opt,name=days_since_custom_time,json=daysSinceCustomTime,proto3,oneof" json:"days_since_custom_time,omitempty"` + // An object matches this condition if the custom timestamp set on the + // object is before the specified date in UTC. + CustomTimeBefore *date.Date `protobuf:"bytes,8,opt,name=custom_time_before,json=customTimeBefore,proto3" json:"custom_time_before,omitempty"` + // This condition is relevant only for versioned objects. An object + // version satisfies this condition only if these many days have been + // passed since it became noncurrent. The value of the field must be a + // nonnegative integer. If it's zero, the object version will become + // eligible for Lifecycle action as soon as it becomes noncurrent. + DaysSinceNoncurrentTime *int32 `protobuf:"varint,9,opt,name=days_since_noncurrent_time,json=daysSinceNoncurrentTime,proto3,oneof" json:"days_since_noncurrent_time,omitempty"` + // This condition is relevant only for versioned objects. An object + // version satisfies this condition only if it became noncurrent before + // the specified date in UTC. + NoncurrentTimeBefore *date.Date `protobuf:"bytes,10,opt,name=noncurrent_time_before,json=noncurrentTimeBefore,proto3" json:"noncurrent_time_before,omitempty"` + // List of object name prefixes. If any prefix exactly matches the + // beginning of the object name, the condition evaluates to true. + MatchesPrefix []string `protobuf:"bytes,11,rep,name=matches_prefix,json=matchesPrefix,proto3" json:"matches_prefix,omitempty"` + // List of object name suffixes. If any suffix exactly matches the + // end of the object name, the condition evaluates to true. + MatchesSuffix []string `protobuf:"bytes,12,rep,name=matches_suffix,json=matchesSuffix,proto3" json:"matches_suffix,omitempty"` +} + +func (x *Bucket_Lifecycle_Rule_Condition) Reset() { + *x = Bucket_Lifecycle_Rule_Condition{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Lifecycle_Rule_Condition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {} + +func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead. +func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 1} +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 { + if x != nil && x.AgeDays != nil { + return *x.AgeDays + } + return 0 +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetCreatedBefore() *date.Date { + if x != nil { + return x.CreatedBefore + } + return nil +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetIsLive() bool { + if x != nil && x.IsLive != nil { + return *x.IsLive + } + return false +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetNumNewerVersions() int32 { + if x != nil && x.NumNewerVersions != nil { + return *x.NumNewerVersions + } + return 0 +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesStorageClass() []string { + if x != nil { + return x.MatchesStorageClass + } + return nil +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceCustomTime() int32 { + if x != nil && x.DaysSinceCustomTime != nil { + return *x.DaysSinceCustomTime + } + return 0 +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetCustomTimeBefore() *date.Date { + if x != nil { + return x.CustomTimeBefore + } + return nil +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceNoncurrentTime() int32 { + if x != nil && x.DaysSinceNoncurrentTime != nil { + return *x.DaysSinceNoncurrentTime + } + return 0 +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetNoncurrentTimeBefore() *date.Date { + if x != nil { + return x.NoncurrentTimeBefore + } + return nil +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesPrefix() []string { + if x != nil { + return x.MatchesPrefix + } + return nil +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesSuffix() []string { + if x != nil { + return x.MatchesSuffix + } + return nil +} + +var File_google_storage_v2_storage_proto protoreflect.FileDescriptor + +var file_google_storage_v2_storage_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, + 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, + 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, + 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, + 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x02, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, + 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x22, 0xa1, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, + 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, + 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, + 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, + 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, + 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x41, 0x63, 0x6c, 0x22, 0x81, 0x02, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x3c, 0x0a, 0x09, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, + 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, + 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x33, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x07, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, + 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x9e, 0x01, 0x0a, + 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xb6, 0x03, + 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, + 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, + 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, + 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, + 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x5c, 0x0a, 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, 0x0a, 0x23, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x19, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, + 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x95, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8a, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, + 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, + 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, + 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, + 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0d, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x3c, 0x0a, + 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, + 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, + 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, + 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x6d, + 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, + 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8, 0x02, 0x0a, + 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, + 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xc0, 0x04, 0x0a, 0x13, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, + 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, + 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, + 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, + 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, + 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x3f, + 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, + 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, + 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, + 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, + 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0xca, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, + 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, + 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, + 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, + 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, + 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, + 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x89, 0x05, 0x0a, + 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, + 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, + 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, + 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, + 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, + 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, + 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, + 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, + 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x4d, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, + 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, + 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, + 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, + 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, + 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, + 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, + 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, + 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, + 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, + 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, + 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, + 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, + 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, + 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xd3, + 0x03, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x3c, + 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, + 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, + 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, + 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x34, + 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, + 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, + 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, + 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, + 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, + 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x22, 0x93, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, + 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, + 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, + 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, + 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, + 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, + 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, + 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, + 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, + 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, + 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, + 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, + 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, + 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, + 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, + 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, + 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, + 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, + 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, + 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, + 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, + 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, + 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, + 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, 0x05, + 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, + 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, + 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, + 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, + 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, + 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, + 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, + 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, + 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, + 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x73, + 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, + 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, + 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, 0x77, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, 0x4c, + 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, + 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, 0x0a, + 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, + 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, + 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, + 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, + 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, + 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, + 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, + 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, + 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, + 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, + 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, + 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, + 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, + 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, + 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, + 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, + 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, + 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, + 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, + 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, + 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, + 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, + 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, + 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, + 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, + 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, + 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, + 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, + 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, + 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, + 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, + 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, + 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, + 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, + 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, + 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, + 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, + 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, + 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, + 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, + 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, + 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, + 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, + 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, + 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, + 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xf0, 0x1e, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, + 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, + 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, + 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, + 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, + 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, + 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, + 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, + 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, + 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, + 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, + 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, + 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, + 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, + 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, + 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, + 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, + 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, + 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, + 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, + 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, + 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, + 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, + 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, + 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, + 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, + 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, + 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, + 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, + 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, + 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, + 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, + 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, + 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, + 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, + 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, + 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, + 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, + 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, + 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, + 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, + 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, + 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, + 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, + 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, + 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, + 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, + 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, + 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, + 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, + 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, + 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, + 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, + 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, + 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, + 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, + 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, + 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, + 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, + 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, + 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, + 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0x80, 0x02, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, + 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, + 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, + 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, + 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, + 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, + 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x67, 0x0a, 0x09, 0x41, + 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, + 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, + 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, + 0x61, 0x6d, 0x22, 0x53, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, + 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, + 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, + 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, + 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, + 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, + 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, + 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, + 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, + 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, + 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, + 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, + 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0xec, + 0x03, 0x0a, 0x0c, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x62, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x70, 0xea, 0x41, 0x6d, + 0x0a, 0x23, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0x71, 0x0a, + 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, + 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, + 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x22, 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, + 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, + 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, + 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, + 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, + 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, + 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, + 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, + 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, + 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, + 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, + 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, + 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, + 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, + 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, + 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, + 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x22, + 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, + 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, + 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, + 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, 0x50, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x5f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, + 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, 0x0a, 0x05, 0x4f, + 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, + 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xaf, 0x25, 0x0a, 0x07, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x6f, 0x0a, 0x09, 0x47, 0x65, + 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, + 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x8b, 0x01, 0x0a, 0x0c, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, + 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, + 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, 0x69, + 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, + 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, + 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, + 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, + 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x22, 0x60, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x22, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xd7, 0x01, 0x0a, 0x12, 0x54, + 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, + 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, + 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, + 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x14, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, + 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x12, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x12, 0x93, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, + 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x96, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, + 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x98, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x33, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, + 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x13, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x96, 0x01, 0x0a, 0x11, + 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x23, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, + 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0xba, 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, + 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, + 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, + 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, + 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x95, 0x01, 0x0a, + 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, + 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, + 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, + 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, + 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x22, 0x39, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x60, 0x0a, 0x0b, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x84, 0x01, + 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, + 0xae, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, + 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, + 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0xae, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, + 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, + 0x64, 0x12, 0x80, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, + 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x95, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, + 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x1d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x77, 0x0a, 0x0d, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, + 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x9d, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, + 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x22, 0x3f, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x0a, 0x10, 0x68, 0x6d, 0x61, + 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x14, 0x68, 0x6d, + 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, + 0x73, 0x6b, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, + 0x8a, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, + 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, + 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, + 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, + 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, + 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, + 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, + 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xdc, 0x01, 0x0a, + 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, + 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, + 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_storage_v2_storage_proto_rawDescOnce sync.Once + file_google_storage_v2_storage_proto_rawDescData = file_google_storage_v2_storage_proto_rawDesc +) + +func file_google_storage_v2_storage_proto_rawDescGZIP() []byte { + file_google_storage_v2_storage_proto_rawDescOnce.Do(func() { + file_google_storage_v2_storage_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_storage_v2_storage_proto_rawDescData) + }) + return file_google_storage_v2_storage_proto_rawDescData +} + +var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 74) +var file_google_storage_v2_storage_proto_goTypes = []interface{}{ + (ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values + (*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest + (*GetBucketRequest)(nil), // 2: google.storage.v2.GetBucketRequest + (*CreateBucketRequest)(nil), // 3: google.storage.v2.CreateBucketRequest + (*ListBucketsRequest)(nil), // 4: google.storage.v2.ListBucketsRequest + (*ListBucketsResponse)(nil), // 5: google.storage.v2.ListBucketsResponse + (*LockBucketRetentionPolicyRequest)(nil), // 6: google.storage.v2.LockBucketRetentionPolicyRequest + (*UpdateBucketRequest)(nil), // 7: google.storage.v2.UpdateBucketRequest + (*DeleteNotificationRequest)(nil), // 8: google.storage.v2.DeleteNotificationRequest + (*GetNotificationRequest)(nil), // 9: google.storage.v2.GetNotificationRequest + (*CreateNotificationRequest)(nil), // 10: google.storage.v2.CreateNotificationRequest + (*ListNotificationsRequest)(nil), // 11: google.storage.v2.ListNotificationsRequest + (*ListNotificationsResponse)(nil), // 12: google.storage.v2.ListNotificationsResponse + (*ComposeObjectRequest)(nil), // 13: google.storage.v2.ComposeObjectRequest + (*DeleteObjectRequest)(nil), // 14: google.storage.v2.DeleteObjectRequest + (*CancelResumableWriteRequest)(nil), // 15: google.storage.v2.CancelResumableWriteRequest + (*CancelResumableWriteResponse)(nil), // 16: google.storage.v2.CancelResumableWriteResponse + (*ReadObjectRequest)(nil), // 17: google.storage.v2.ReadObjectRequest + (*GetObjectRequest)(nil), // 18: google.storage.v2.GetObjectRequest + (*ReadObjectResponse)(nil), // 19: google.storage.v2.ReadObjectResponse + (*WriteObjectSpec)(nil), // 20: google.storage.v2.WriteObjectSpec + (*WriteObjectRequest)(nil), // 21: google.storage.v2.WriteObjectRequest + (*WriteObjectResponse)(nil), // 22: google.storage.v2.WriteObjectResponse + (*ListObjectsRequest)(nil), // 23: google.storage.v2.ListObjectsRequest + (*QueryWriteStatusRequest)(nil), // 24: google.storage.v2.QueryWriteStatusRequest + (*QueryWriteStatusResponse)(nil), // 25: google.storage.v2.QueryWriteStatusResponse + (*RewriteObjectRequest)(nil), // 26: google.storage.v2.RewriteObjectRequest + (*RewriteResponse)(nil), // 27: google.storage.v2.RewriteResponse + (*StartResumableWriteRequest)(nil), // 28: google.storage.v2.StartResumableWriteRequest + (*StartResumableWriteResponse)(nil), // 29: google.storage.v2.StartResumableWriteResponse + (*UpdateObjectRequest)(nil), // 30: google.storage.v2.UpdateObjectRequest + (*GetServiceAccountRequest)(nil), // 31: google.storage.v2.GetServiceAccountRequest + (*CreateHmacKeyRequest)(nil), // 32: google.storage.v2.CreateHmacKeyRequest + (*CreateHmacKeyResponse)(nil), // 33: google.storage.v2.CreateHmacKeyResponse + (*DeleteHmacKeyRequest)(nil), // 34: google.storage.v2.DeleteHmacKeyRequest + (*GetHmacKeyRequest)(nil), // 35: google.storage.v2.GetHmacKeyRequest + (*ListHmacKeysRequest)(nil), // 36: google.storage.v2.ListHmacKeysRequest + (*ListHmacKeysResponse)(nil), // 37: google.storage.v2.ListHmacKeysResponse + (*UpdateHmacKeyRequest)(nil), // 38: google.storage.v2.UpdateHmacKeyRequest + (*CommonObjectRequestParams)(nil), // 39: google.storage.v2.CommonObjectRequestParams + (*ServiceConstants)(nil), // 40: google.storage.v2.ServiceConstants + (*Bucket)(nil), // 41: google.storage.v2.Bucket + (*BucketAccessControl)(nil), // 42: google.storage.v2.BucketAccessControl + (*ChecksummedData)(nil), // 43: google.storage.v2.ChecksummedData + (*ObjectChecksums)(nil), // 44: google.storage.v2.ObjectChecksums + (*HmacKeyMetadata)(nil), // 45: google.storage.v2.HmacKeyMetadata + (*Notification)(nil), // 46: google.storage.v2.Notification + (*CustomerEncryption)(nil), // 47: google.storage.v2.CustomerEncryption + (*Object)(nil), // 48: google.storage.v2.Object + (*ObjectAccessControl)(nil), // 49: google.storage.v2.ObjectAccessControl + (*ListObjectsResponse)(nil), // 50: google.storage.v2.ListObjectsResponse + (*ProjectTeam)(nil), // 51: google.storage.v2.ProjectTeam + (*ServiceAccount)(nil), // 52: google.storage.v2.ServiceAccount + (*Owner)(nil), // 53: google.storage.v2.Owner + (*ContentRange)(nil), // 54: google.storage.v2.ContentRange + (*ComposeObjectRequest_SourceObject)(nil), // 55: google.storage.v2.ComposeObjectRequest.SourceObject + (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 56: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + (*Bucket_Billing)(nil), // 57: google.storage.v2.Bucket.Billing + (*Bucket_Cors)(nil), // 58: google.storage.v2.Bucket.Cors + (*Bucket_Encryption)(nil), // 59: google.storage.v2.Bucket.Encryption + (*Bucket_IamConfig)(nil), // 60: google.storage.v2.Bucket.IamConfig + (*Bucket_Lifecycle)(nil), // 61: google.storage.v2.Bucket.Lifecycle + (*Bucket_Logging)(nil), // 62: google.storage.v2.Bucket.Logging + (*Bucket_RetentionPolicy)(nil), // 63: google.storage.v2.Bucket.RetentionPolicy + (*Bucket_Versioning)(nil), // 64: google.storage.v2.Bucket.Versioning + (*Bucket_Website)(nil), // 65: google.storage.v2.Bucket.Website + (*Bucket_CustomPlacementConfig)(nil), // 66: google.storage.v2.Bucket.CustomPlacementConfig + (*Bucket_Autoclass)(nil), // 67: google.storage.v2.Bucket.Autoclass + nil, // 68: google.storage.v2.Bucket.LabelsEntry + (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 69: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + (*Bucket_Lifecycle_Rule)(nil), // 70: google.storage.v2.Bucket.Lifecycle.Rule + (*Bucket_Lifecycle_Rule_Action)(nil), // 71: google.storage.v2.Bucket.Lifecycle.Rule.Action + (*Bucket_Lifecycle_Rule_Condition)(nil), // 72: google.storage.v2.Bucket.Lifecycle.Rule.Condition + nil, // 73: google.storage.v2.Notification.CustomAttributesEntry + nil, // 74: google.storage.v2.Object.MetadataEntry + (*fieldmaskpb.FieldMask)(nil), // 75: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 76: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 77: google.protobuf.Duration + (*date.Date)(nil), // 78: google.type.Date + (*v1.GetIamPolicyRequest)(nil), // 79: google.iam.v1.GetIamPolicyRequest + (*v1.SetIamPolicyRequest)(nil), // 80: google.iam.v1.SetIamPolicyRequest + (*v1.TestIamPermissionsRequest)(nil), // 81: google.iam.v1.TestIamPermissionsRequest + (*emptypb.Empty)(nil), // 82: google.protobuf.Empty + (*v1.Policy)(nil), // 83: google.iam.v1.Policy + (*v1.TestIamPermissionsResponse)(nil), // 84: google.iam.v1.TestIamPermissionsResponse +} +var file_google_storage_v2_storage_proto_depIdxs = []int32{ + 75, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask + 41, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 75, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask + 41, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket + 41, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 75, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask + 46, // 6: google.storage.v2.CreateNotificationRequest.notification:type_name -> google.storage.v2.Notification + 46, // 7: google.storage.v2.ListNotificationsResponse.notifications:type_name -> google.storage.v2.Notification + 48, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object + 55, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject + 39, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 44, // 11: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 39, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 39, // 13: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 75, // 14: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 39, // 15: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 75, // 16: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 43, // 17: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 44, // 18: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 54, // 19: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange + 48, // 20: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object + 48, // 21: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object + 20, // 22: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 43, // 23: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 44, // 24: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 39, // 25: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 48, // 26: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object + 75, // 27: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask + 39, // 28: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 48, // 29: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object + 48, // 30: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object + 39, // 31: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 44, // 32: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 48, // 33: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object + 20, // 34: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 39, // 35: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 44, // 36: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 48, // 37: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object + 75, // 38: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask + 39, // 39: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 45, // 40: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata + 45, // 41: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata + 45, // 42: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata + 75, // 43: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask + 42, // 44: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl + 49, // 45: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl + 61, // 46: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle + 76, // 47: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp + 58, // 48: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors + 76, // 49: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp + 68, // 50: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry + 65, // 51: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website + 64, // 52: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning + 62, // 53: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging + 53, // 54: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner + 59, // 55: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption + 57, // 56: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing + 63, // 57: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy + 60, // 58: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig + 66, // 59: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig + 67, // 60: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass + 51, // 61: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 76, // 62: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp + 76, // 63: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp + 73, // 64: google.storage.v2.Notification.custom_attributes:type_name -> google.storage.v2.Notification.CustomAttributesEntry + 49, // 65: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl + 76, // 66: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp + 76, // 67: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp + 44, // 68: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums + 76, // 69: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp + 76, // 70: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp + 76, // 71: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp + 74, // 72: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry + 53, // 73: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner + 47, // 74: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption + 76, // 75: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp + 51, // 76: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 48, // 77: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object + 56, // 78: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + 69, // 79: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + 70, // 80: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule + 76, // 81: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp + 77, // 82: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration + 76, // 83: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp + 76, // 84: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp + 71, // 85: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action + 72, // 86: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition + 78, // 87: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date + 78, // 88: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date + 78, // 89: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date + 1, // 90: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest + 2, // 91: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest + 3, // 92: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest + 4, // 93: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest + 6, // 94: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest + 79, // 95: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 80, // 96: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 81, // 97: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 7, // 98: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest + 8, // 99: google.storage.v2.Storage.DeleteNotification:input_type -> google.storage.v2.DeleteNotificationRequest + 9, // 100: google.storage.v2.Storage.GetNotification:input_type -> google.storage.v2.GetNotificationRequest + 10, // 101: google.storage.v2.Storage.CreateNotification:input_type -> google.storage.v2.CreateNotificationRequest + 11, // 102: google.storage.v2.Storage.ListNotifications:input_type -> google.storage.v2.ListNotificationsRequest + 13, // 103: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest + 14, // 104: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest + 15, // 105: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest + 18, // 106: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest + 17, // 107: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest + 30, // 108: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest + 21, // 109: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest + 23, // 110: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest + 26, // 111: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest + 28, // 112: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest + 24, // 113: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest + 31, // 114: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest + 32, // 115: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest + 34, // 116: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest + 35, // 117: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest + 36, // 118: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest + 38, // 119: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest + 82, // 120: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty + 41, // 121: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket + 41, // 122: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket + 5, // 123: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse + 41, // 124: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket + 83, // 125: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy + 83, // 126: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy + 84, // 127: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 41, // 128: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket + 82, // 129: google.storage.v2.Storage.DeleteNotification:output_type -> google.protobuf.Empty + 46, // 130: google.storage.v2.Storage.GetNotification:output_type -> google.storage.v2.Notification + 46, // 131: google.storage.v2.Storage.CreateNotification:output_type -> google.storage.v2.Notification + 12, // 132: google.storage.v2.Storage.ListNotifications:output_type -> google.storage.v2.ListNotificationsResponse + 48, // 133: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object + 82, // 134: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty + 16, // 135: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse + 48, // 136: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object + 19, // 137: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse + 48, // 138: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object + 22, // 139: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse + 50, // 140: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse + 27, // 141: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse + 29, // 142: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse + 25, // 143: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse + 52, // 144: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount + 33, // 145: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse + 82, // 146: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty + 45, // 147: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 37, // 148: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse + 45, // 149: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 120, // [120:150] is the sub-list for method output_type + 90, // [90:120] is the sub-list for method input_type + 90, // [90:90] is the sub-list for extension type_name + 90, // [90:90] is the sub-list for extension extendee + 0, // [0:90] is the sub-list for field type_name +} + +func init() { file_google_storage_v2_storage_proto_init() } +func file_google_storage_v2_storage_proto_init() { + if File_google_storage_v2_storage_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_storage_v2_storage_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteBucketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBucketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateBucketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListBucketsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListBucketsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LockBucketRetentionPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateBucketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteNotificationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetNotificationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateNotificationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListNotificationsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListNotificationsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ComposeObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelResumableWriteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelResumableWriteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadObjectResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteObjectSpec); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteObjectResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListObjectsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryWriteStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryWriteStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RewriteObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RewriteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartResumableWriteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartResumableWriteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetServiceAccountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateHmacKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateHmacKeyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteHmacKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetHmacKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListHmacKeysRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListHmacKeysResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateHmacKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonObjectRequestParams); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceConstants); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BucketAccessControl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ChecksummedData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectChecksums); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HmacKeyMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Notification); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomerEncryption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Object); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectAccessControl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListObjectsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProjectTeam); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceAccount); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Owner); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContentRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ComposeObjectRequest_SourceObject); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Billing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Cors); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Encryption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_IamConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Lifecycle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Logging); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_RetentionPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Versioning); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Website); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_CustomPlacementConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Autoclass); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Lifecycle_Rule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Lifecycle_Rule_Action); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Lifecycle_Rule_Condition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_storage_v2_storage_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[16].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []interface{}{ + (*WriteObjectRequest_UploadId)(nil), + (*WriteObjectRequest_WriteObjectSpec)(nil), + (*WriteObjectRequest_ChecksummedData)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []interface{}{ + (*WriteObjectResponse_PersistedSize)(nil), + (*WriteObjectResponse_Resource)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []interface{}{ + (*QueryWriteStatusResponse_PersistedSize)(nil), + (*QueryWriteStatusResponse_Resource)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[29].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[42].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[43].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[47].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[55].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[62].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[71].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_storage_v2_storage_proto_rawDesc, + NumEnums: 1, + NumMessages: 74, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_storage_v2_storage_proto_goTypes, + DependencyIndexes: file_google_storage_v2_storage_proto_depIdxs, + EnumInfos: file_google_storage_v2_storage_proto_enumTypes, + MessageInfos: file_google_storage_v2_storage_proto_msgTypes, + }.Build() + File_google_storage_v2_storage_proto = out.File + file_google_storage_v2_storage_proto_rawDesc = nil + file_google_storage_v2_storage_proto_goTypes = nil + file_google_storage_v2_storage_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// StorageClient is the client API for Storage service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type StorageClient interface { + // Permanently deletes an empty bucket. + DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Returns metadata for the specified bucket. + GetBucket(ctx context.Context, in *GetBucketRequest, opts ...grpc.CallOption) (*Bucket, error) + // Creates a new bucket. + CreateBucket(ctx context.Context, in *CreateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) + // Retrieves a list of buckets for a given project. + ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) + // Locks retention policy on a bucket. + LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) + // Gets the IAM policy for a specified bucket or object. + // The `resource` field in the request should be + // projects/_/buckets/ for a bucket or + // projects/_/buckets//objects/ for an object. + GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) + // Updates an IAM policy for the specified bucket or object. + // The `resource` field in the request should be + // projects/_/buckets/ for a bucket or + // projects/_/buckets//objects/ for an object. + SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) + // Tests a set of permissions on the given bucket or object to see which, if + // any, are held by the caller. + // The `resource` field in the request should be + // projects/_/buckets/ for a bucket or + // projects/_/buckets//objects/ for an object. + TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) + // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. + UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) + // Permanently deletes a notification subscription. + DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // View a notification config. + GetNotification(ctx context.Context, in *GetNotificationRequest, opts ...grpc.CallOption) (*Notification, error) + // Creates a notification subscription for a given bucket. + // These notifications, when triggered, publish messages to the specified + // Pub/Sub topics. + // See https://cloud.google.com/storage/docs/pubsub-notifications. + CreateNotification(ctx context.Context, in *CreateNotificationRequest, opts ...grpc.CallOption) (*Notification, error) + // Retrieves a list of notification subscriptions for a given bucket. + ListNotifications(ctx context.Context, in *ListNotificationsRequest, opts ...grpc.CallOption) (*ListNotificationsResponse, error) + // Concatenates a list of existing objects into a new object in the same + // bucket. + ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) + // Deletes an object and its metadata. Deletions are permanent if versioning + // is not enabled for the bucket, or if the `generation` parameter is used. + DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Cancels an in-progress resumable upload. + CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) + // Retrieves an object's metadata. + GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error) + // Reads an object's data. + ReadObject(ctx context.Context, in *ReadObjectRequest, opts ...grpc.CallOption) (Storage_ReadObjectClient, error) + // Updates an object's metadata. + // Equivalent to JSON API's storage.objects.patch. + UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error) + // Stores a new object and metadata. + // + // An object can be written either in a single message stream or in a + // resumable sequence of message streams. To write using a single stream, + // the client should include in the first message of the stream an + // `WriteObjectSpec` describing the destination bucket, object, and any + // preconditions. Additionally, the final message must set 'finish_write' to + // true, or else it is an error. + // + // For a resumable write, the client should instead call + // `StartResumableWrite()`, populating a `WriteObjectSpec` into that request. + // They should then attach the returned `upload_id` to the first message of + // each following call to `WriteObject`. If the stream is closed before + // finishing the upload (either explicitly by the client or due to a network + // error or an error response from the server), the client should do as + // follows: + // - Check the result Status of the stream, to determine if writing can be + // resumed on this stream or must be restarted from scratch (by calling + // `StartResumableWrite()`). The resumable errors are DEADLINE_EXCEEDED, + // INTERNAL, and UNAVAILABLE. For each case, the client should use binary + // exponential backoff before retrying. Additionally, writes can be + // resumed after RESOURCE_EXHAUSTED errors, but only after taking + // appropriate measures, which may include reducing aggregate send rate + // across clients and/or requesting a quota increase for your project. + // - If the call to `WriteObject` returns `ABORTED`, that indicates + // concurrent attempts to update the resumable write, caused either by + // multiple racing clients or by a single client where the previous + // request was timed out on the client side but nonetheless reached the + // server. In this case the client should take steps to prevent further + // concurrent writes (e.g., increase the timeouts, stop using more than + // one process to perform the upload, etc.), and then should follow the + // steps below for resuming the upload. + // - For resumable errors, the client should call `QueryWriteStatus()` and + // then continue writing from the returned `persisted_size`. This may be + // less than the amount of data the client previously sent. Note also that + // it is acceptable to send data starting at an offset earlier than the + // returned `persisted_size`; in this case, the service will skip data at + // offsets that were already persisted (without checking that it matches + // the previously written data), and write only the data starting from the + // persisted offset. This behavior can make client-side handling simpler + // in some cases. + // + // The service will not view the object as complete until the client has + // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any + // requests on a stream after sending a request with `finish_write` set to + // `true` will cause an error. The client **should** check the response it + // receives to determine how much data the service was able to commit and + // whether the service views the object as complete. + // + // Attempting to resume an already finalized object will result in an OK + // status, with a WriteObjectResponse containing the finalized object's + // metadata. + WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) + // Retrieves a list of objects matching the criteria. + ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) + // Rewrites a source object to a destination object. Optionally overrides + // metadata. + RewriteObject(ctx context.Context, in *RewriteObjectRequest, opts ...grpc.CallOption) (*RewriteResponse, error) + // Starts a resumable write. How long the write operation remains valid, and + // what happens when the write operation becomes invalid, are + // service-dependent. + StartResumableWrite(ctx context.Context, in *StartResumableWriteRequest, opts ...grpc.CallOption) (*StartResumableWriteResponse, error) + // Determines the `persisted_size` for an object that is being written, which + // can then be used as the `write_offset` for the next `Write()` call. + // + // If the object does not exist (i.e., the object has been deleted, or the + // first `Write()` has not yet reached the service), this method returns the + // error `NOT_FOUND`. + // + // The client **may** call `QueryWriteStatus()` at any time to determine how + // much data has been processed for this object. This is useful if the + // client is buffering data and needs to know which data can be safely + // evicted. For any sequence of `QueryWriteStatus()` calls for a given + // object name, the sequence of returned `persisted_size` values will be + // non-decreasing. + QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) + // Retrieves the name of a project's Google Cloud Storage service account. + GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) + // Creates a new HMAC key for the given service account. + CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) + // Deletes a given HMAC key. Key must be in an INACTIVE state. + DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Gets an existing HMAC key metadata for the given id. + GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) + // Lists HMAC keys under a given project with the additional filters provided. + ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) + // Updates a given HMAC key state between ACTIVE and INACTIVE. + UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) +} + +type storageClient struct { + cc grpc.ClientConnInterface +} + +func NewStorageClient(cc grpc.ClientConnInterface) StorageClient { + return &storageClient{cc} +} + +func (c *storageClient) DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteBucket", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetBucket(ctx context.Context, in *GetBucketRequest, opts ...grpc.CallOption) (*Bucket, error) { + out := new(Bucket) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetBucket", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) CreateBucket(ctx context.Context, in *CreateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) { + out := new(Bucket) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateBucket", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) { + out := new(ListBucketsResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListBuckets", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) { + out := new(Bucket) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/LockBucketRetentionPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) { + out := new(v1.Policy) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetIamPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) { + out := new(v1.Policy) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/SetIamPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) { + out := new(v1.TestIamPermissionsResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/TestIamPermissions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) { + out := new(Bucket) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateBucket", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotification", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetNotification(ctx context.Context, in *GetNotificationRequest, opts ...grpc.CallOption) (*Notification, error) { + out := new(Notification) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotification", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) CreateNotification(ctx context.Context, in *CreateNotificationRequest, opts ...grpc.CallOption) (*Notification, error) { + out := new(Notification) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotification", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) ListNotifications(ctx context.Context, in *ListNotificationsRequest, opts ...grpc.CallOption) (*ListNotificationsResponse, error) { + out := new(ListNotificationsResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotifications", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) { + out := new(Object) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ComposeObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) { + out := new(CancelResumableWriteResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CancelResumableWrite", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error) { + out := new(Object) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) ReadObject(ctx context.Context, in *ReadObjectRequest, opts ...grpc.CallOption) (Storage_ReadObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[0], "/google.storage.v2.Storage/ReadObject", opts...) + if err != nil { + return nil, err + } + x := &storageReadObjectClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Storage_ReadObjectClient interface { + Recv() (*ReadObjectResponse, error) + grpc.ClientStream +} + +type storageReadObjectClient struct { + grpc.ClientStream +} + +func (x *storageReadObjectClient) Recv() (*ReadObjectResponse, error) { + m := new(ReadObjectResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *storageClient) UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error) { + out := new(Object) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[1], "/google.storage.v2.Storage/WriteObject", opts...) + if err != nil { + return nil, err + } + x := &storageWriteObjectClient{stream} + return x, nil +} + +type Storage_WriteObjectClient interface { + Send(*WriteObjectRequest) error + CloseAndRecv() (*WriteObjectResponse, error) + grpc.ClientStream +} + +type storageWriteObjectClient struct { + grpc.ClientStream +} + +func (x *storageWriteObjectClient) Send(m *WriteObjectRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *storageWriteObjectClient) CloseAndRecv() (*WriteObjectResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(WriteObjectResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *storageClient) ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) { + out := new(ListObjectsResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListObjects", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) RewriteObject(ctx context.Context, in *RewriteObjectRequest, opts ...grpc.CallOption) (*RewriteResponse, error) { + out := new(RewriteResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/RewriteObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) StartResumableWrite(ctx context.Context, in *StartResumableWriteRequest, opts ...grpc.CallOption) (*StartResumableWriteResponse, error) { + out := new(StartResumableWriteResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/StartResumableWrite", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) { + out := new(QueryWriteStatusResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/QueryWriteStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) { + out := new(ServiceAccount) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetServiceAccount", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) { + out := new(CreateHmacKeyResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateHmacKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) { + out := new(HmacKeyMetadata) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetHmacKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) { + out := new(ListHmacKeysResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListHmacKeys", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) { + out := new(HmacKeyMetadata) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateHmacKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// StorageServer is the server API for Storage service. +type StorageServer interface { + // Permanently deletes an empty bucket. + DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) + // Returns metadata for the specified bucket. + GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) + // Creates a new bucket. + CreateBucket(context.Context, *CreateBucketRequest) (*Bucket, error) + // Retrieves a list of buckets for a given project. + ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) + // Locks retention policy on a bucket. + LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) + // Gets the IAM policy for a specified bucket or object. + // The `resource` field in the request should be + // projects/_/buckets/ for a bucket or + // projects/_/buckets//objects/ for an object. + GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) + // Updates an IAM policy for the specified bucket or object. + // The `resource` field in the request should be + // projects/_/buckets/ for a bucket or + // projects/_/buckets//objects/ for an object. + SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) + // Tests a set of permissions on the given bucket or object to see which, if + // any, are held by the caller. + // The `resource` field in the request should be + // projects/_/buckets/ for a bucket or + // projects/_/buckets//objects/ for an object. + TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) + // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. + UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) + // Permanently deletes a notification subscription. + DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) + // View a notification config. + GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) + // Creates a notification subscription for a given bucket. + // These notifications, when triggered, publish messages to the specified + // Pub/Sub topics. + // See https://cloud.google.com/storage/docs/pubsub-notifications. + CreateNotification(context.Context, *CreateNotificationRequest) (*Notification, error) + // Retrieves a list of notification subscriptions for a given bucket. + ListNotifications(context.Context, *ListNotificationsRequest) (*ListNotificationsResponse, error) + // Concatenates a list of existing objects into a new object in the same + // bucket. + ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) + // Deletes an object and its metadata. Deletions are permanent if versioning + // is not enabled for the bucket, or if the `generation` parameter is used. + DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) + // Cancels an in-progress resumable upload. + CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) + // Retrieves an object's metadata. + GetObject(context.Context, *GetObjectRequest) (*Object, error) + // Reads an object's data. + ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error + // Updates an object's metadata. + // Equivalent to JSON API's storage.objects.patch. + UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error) + // Stores a new object and metadata. + // + // An object can be written either in a single message stream or in a + // resumable sequence of message streams. To write using a single stream, + // the client should include in the first message of the stream an + // `WriteObjectSpec` describing the destination bucket, object, and any + // preconditions. Additionally, the final message must set 'finish_write' to + // true, or else it is an error. + // + // For a resumable write, the client should instead call + // `StartResumableWrite()`, populating a `WriteObjectSpec` into that request. + // They should then attach the returned `upload_id` to the first message of + // each following call to `WriteObject`. If the stream is closed before + // finishing the upload (either explicitly by the client or due to a network + // error or an error response from the server), the client should do as + // follows: + // - Check the result Status of the stream, to determine if writing can be + // resumed on this stream or must be restarted from scratch (by calling + // `StartResumableWrite()`). The resumable errors are DEADLINE_EXCEEDED, + // INTERNAL, and UNAVAILABLE. For each case, the client should use binary + // exponential backoff before retrying. Additionally, writes can be + // resumed after RESOURCE_EXHAUSTED errors, but only after taking + // appropriate measures, which may include reducing aggregate send rate + // across clients and/or requesting a quota increase for your project. + // - If the call to `WriteObject` returns `ABORTED`, that indicates + // concurrent attempts to update the resumable write, caused either by + // multiple racing clients or by a single client where the previous + // request was timed out on the client side but nonetheless reached the + // server. In this case the client should take steps to prevent further + // concurrent writes (e.g., increase the timeouts, stop using more than + // one process to perform the upload, etc.), and then should follow the + // steps below for resuming the upload. + // - For resumable errors, the client should call `QueryWriteStatus()` and + // then continue writing from the returned `persisted_size`. This may be + // less than the amount of data the client previously sent. Note also that + // it is acceptable to send data starting at an offset earlier than the + // returned `persisted_size`; in this case, the service will skip data at + // offsets that were already persisted (without checking that it matches + // the previously written data), and write only the data starting from the + // persisted offset. This behavior can make client-side handling simpler + // in some cases. + // + // The service will not view the object as complete until the client has + // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any + // requests on a stream after sending a request with `finish_write` set to + // `true` will cause an error. The client **should** check the response it + // receives to determine how much data the service was able to commit and + // whether the service views the object as complete. + // + // Attempting to resume an already finalized object will result in an OK + // status, with a WriteObjectResponse containing the finalized object's + // metadata. + WriteObject(Storage_WriteObjectServer) error + // Retrieves a list of objects matching the criteria. + ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) + // Rewrites a source object to a destination object. Optionally overrides + // metadata. + RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error) + // Starts a resumable write. How long the write operation remains valid, and + // what happens when the write operation becomes invalid, are + // service-dependent. + StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error) + // Determines the `persisted_size` for an object that is being written, which + // can then be used as the `write_offset` for the next `Write()` call. + // + // If the object does not exist (i.e., the object has been deleted, or the + // first `Write()` has not yet reached the service), this method returns the + // error `NOT_FOUND`. + // + // The client **may** call `QueryWriteStatus()` at any time to determine how + // much data has been processed for this object. This is useful if the + // client is buffering data and needs to know which data can be safely + // evicted. For any sequence of `QueryWriteStatus()` calls for a given + // object name, the sequence of returned `persisted_size` values will be + // non-decreasing. + QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) + // Retrieves the name of a project's Google Cloud Storage service account. + GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) + // Creates a new HMAC key for the given service account. + CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) + // Deletes a given HMAC key. Key must be in an INACTIVE state. + DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) + // Gets an existing HMAC key metadata for the given id. + GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) + // Lists HMAC keys under a given project with the additional filters provided. + ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error) + // Updates a given HMAC key state between ACTIVE and INACTIVE. + UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) +} + +// UnimplementedStorageServer can be embedded to have forward compatible implementations. +type UnimplementedStorageServer struct { +} + +func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteBucket not implemented") +} +func (*UnimplementedStorageServer) GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetBucket not implemented") +} +func (*UnimplementedStorageServer) CreateBucket(context.Context, *CreateBucketRequest) (*Bucket, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateBucket not implemented") +} +func (*UnimplementedStorageServer) ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListBuckets not implemented") +} +func (*UnimplementedStorageServer) LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) { + return nil, status.Errorf(codes.Unimplemented, "method LockBucketRetentionPolicy not implemented") +} +func (*UnimplementedStorageServer) GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") +} +func (*UnimplementedStorageServer) SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") +} +func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") +} +func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented") +} +func (*UnimplementedStorageServer) DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteNotification not implemented") +} +func (*UnimplementedStorageServer) GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotification not implemented") +} +func (*UnimplementedStorageServer) CreateNotification(context.Context, *CreateNotificationRequest) (*Notification, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateNotification not implemented") +} +func (*UnimplementedStorageServer) ListNotifications(context.Context, *ListNotificationsRequest) (*ListNotificationsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNotifications not implemented") +} +func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented") +} +func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented") +} +func (*UnimplementedStorageServer) CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelResumableWrite not implemented") +} +func (*UnimplementedStorageServer) GetObject(context.Context, *GetObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetObject not implemented") +} +func (*UnimplementedStorageServer) ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error { + return status.Errorf(codes.Unimplemented, "method ReadObject not implemented") +} +func (*UnimplementedStorageServer) UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateObject not implemented") +} +func (*UnimplementedStorageServer) WriteObject(Storage_WriteObjectServer) error { + return status.Errorf(codes.Unimplemented, "method WriteObject not implemented") +} +func (*UnimplementedStorageServer) ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListObjects not implemented") +} +func (*UnimplementedStorageServer) RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RewriteObject not implemented") +} +func (*UnimplementedStorageServer) StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartResumableWrite not implemented") +} +func (*UnimplementedStorageServer) QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method QueryWriteStatus not implemented") +} +func (*UnimplementedStorageServer) GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetServiceAccount not implemented") +} +func (*UnimplementedStorageServer) CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateHmacKey not implemented") +} +func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteHmacKey not implemented") +} +func (*UnimplementedStorageServer) GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetHmacKey not implemented") +} +func (*UnimplementedStorageServer) ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListHmacKeys not implemented") +} +func (*UnimplementedStorageServer) UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateHmacKey not implemented") +} + +func RegisterStorageServer(s *grpc.Server, srv StorageServer) { + s.RegisterService(&_Storage_serviceDesc, srv) +} + +func _Storage_DeleteBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteBucketRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).DeleteBucket(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/DeleteBucket", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).DeleteBucket(ctx, req.(*DeleteBucketRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBucketRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetBucket(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetBucket", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetBucket(ctx, req.(*GetBucketRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_CreateBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateBucketRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).CreateBucket(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/CreateBucket", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).CreateBucket(ctx, req.(*CreateBucketRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_ListBuckets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBucketsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).ListBuckets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/ListBuckets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).ListBuckets(ctx, req.(*ListBucketsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_LockBucketRetentionPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LockBucketRetentionPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).LockBucketRetentionPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/LockBucketRetentionPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).LockBucketRetentionPolicy(ctx, req.(*LockBucketRetentionPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1.GetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetIamPolicy(ctx, req.(*v1.GetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1.SetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).SetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/SetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).SetIamPolicy(ctx, req.(*v1.SetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1.TestIamPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).TestIamPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/TestIamPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).TestIamPermissions(ctx, req.(*v1.TestIamPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_UpdateBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateBucketRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).UpdateBucket(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/UpdateBucket", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).UpdateBucket(ctx, req.(*UpdateBucketRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_DeleteNotification_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNotificationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).DeleteNotification(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/DeleteNotification", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).DeleteNotification(ctx, req.(*DeleteNotificationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetNotification_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetNotification(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetNotification", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetNotification(ctx, req.(*GetNotificationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_CreateNotification_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNotificationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).CreateNotification(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/CreateNotification", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).CreateNotification(ctx, req.(*CreateNotificationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_ListNotifications_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).ListNotifications(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/ListNotifications", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).ListNotifications(ctx, req.(*ListNotificationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_ComposeObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ComposeObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).ComposeObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/ComposeObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).ComposeObject(ctx, req.(*ComposeObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_DeleteObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).DeleteObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/DeleteObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).DeleteObject(ctx, req.(*DeleteObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_CancelResumableWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelResumableWriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).CancelResumableWrite(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/CancelResumableWrite", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).CancelResumableWrite(ctx, req.(*CancelResumableWriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetObject(ctx, req.(*GetObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_ReadObject_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ReadObjectRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StorageServer).ReadObject(m, &storageReadObjectServer{stream}) +} + +type Storage_ReadObjectServer interface { + Send(*ReadObjectResponse) error + grpc.ServerStream +} + +type storageReadObjectServer struct { + grpc.ServerStream +} + +func (x *storageReadObjectServer) Send(m *ReadObjectResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Storage_UpdateObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).UpdateObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/UpdateObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).UpdateObject(ctx, req.(*UpdateObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_WriteObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(StorageServer).WriteObject(&storageWriteObjectServer{stream}) +} + +type Storage_WriteObjectServer interface { + SendAndClose(*WriteObjectResponse) error + Recv() (*WriteObjectRequest, error) + grpc.ServerStream +} + +type storageWriteObjectServer struct { + grpc.ServerStream +} + +func (x *storageWriteObjectServer) SendAndClose(m *WriteObjectResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *storageWriteObjectServer) Recv() (*WriteObjectRequest, error) { + m := new(WriteObjectRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Storage_ListObjects_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListObjectsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).ListObjects(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/ListObjects", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).ListObjects(ctx, req.(*ListObjectsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_RewriteObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RewriteObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).RewriteObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/RewriteObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).RewriteObject(ctx, req.(*RewriteObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_StartResumableWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartResumableWriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).StartResumableWrite(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/StartResumableWrite", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).StartResumableWrite(ctx, req.(*StartResumableWriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_QueryWriteStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryWriteStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).QueryWriteStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/QueryWriteStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).QueryWriteStatus(ctx, req.(*QueryWriteStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetServiceAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceAccountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetServiceAccount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetServiceAccount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetServiceAccount(ctx, req.(*GetServiceAccountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_CreateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateHmacKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).CreateHmacKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/CreateHmacKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).CreateHmacKey(ctx, req.(*CreateHmacKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_DeleteHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteHmacKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).DeleteHmacKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/DeleteHmacKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).DeleteHmacKey(ctx, req.(*DeleteHmacKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetHmacKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetHmacKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetHmacKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetHmacKey(ctx, req.(*GetHmacKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_ListHmacKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListHmacKeysRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).ListHmacKeys(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/ListHmacKeys", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).ListHmacKeys(ctx, req.(*ListHmacKeysRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_UpdateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateHmacKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).UpdateHmacKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/UpdateHmacKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).UpdateHmacKey(ctx, req.(*UpdateHmacKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Storage_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.storage.v2.Storage", + HandlerType: (*StorageServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DeleteBucket", + Handler: _Storage_DeleteBucket_Handler, + }, + { + MethodName: "GetBucket", + Handler: _Storage_GetBucket_Handler, + }, + { + MethodName: "CreateBucket", + Handler: _Storage_CreateBucket_Handler, + }, + { + MethodName: "ListBuckets", + Handler: _Storage_ListBuckets_Handler, + }, + { + MethodName: "LockBucketRetentionPolicy", + Handler: _Storage_LockBucketRetentionPolicy_Handler, + }, + { + MethodName: "GetIamPolicy", + Handler: _Storage_GetIamPolicy_Handler, + }, + { + MethodName: "SetIamPolicy", + Handler: _Storage_SetIamPolicy_Handler, + }, + { + MethodName: "TestIamPermissions", + Handler: _Storage_TestIamPermissions_Handler, + }, + { + MethodName: "UpdateBucket", + Handler: _Storage_UpdateBucket_Handler, + }, + { + MethodName: "DeleteNotification", + Handler: _Storage_DeleteNotification_Handler, + }, + { + MethodName: "GetNotification", + Handler: _Storage_GetNotification_Handler, + }, + { + MethodName: "CreateNotification", + Handler: _Storage_CreateNotification_Handler, + }, + { + MethodName: "ListNotifications", + Handler: _Storage_ListNotifications_Handler, + }, + { + MethodName: "ComposeObject", + Handler: _Storage_ComposeObject_Handler, + }, + { + MethodName: "DeleteObject", + Handler: _Storage_DeleteObject_Handler, + }, + { + MethodName: "CancelResumableWrite", + Handler: _Storage_CancelResumableWrite_Handler, + }, + { + MethodName: "GetObject", + Handler: _Storage_GetObject_Handler, + }, + { + MethodName: "UpdateObject", + Handler: _Storage_UpdateObject_Handler, + }, + { + MethodName: "ListObjects", + Handler: _Storage_ListObjects_Handler, + }, + { + MethodName: "RewriteObject", + Handler: _Storage_RewriteObject_Handler, + }, + { + MethodName: "StartResumableWrite", + Handler: _Storage_StartResumableWrite_Handler, + }, + { + MethodName: "QueryWriteStatus", + Handler: _Storage_QueryWriteStatus_Handler, + }, + { + MethodName: "GetServiceAccount", + Handler: _Storage_GetServiceAccount_Handler, + }, + { + MethodName: "CreateHmacKey", + Handler: _Storage_CreateHmacKey_Handler, + }, + { + MethodName: "DeleteHmacKey", + Handler: _Storage_DeleteHmacKey_Handler, + }, + { + MethodName: "GetHmacKey", + Handler: _Storage_GetHmacKey_Handler, + }, + { + MethodName: "ListHmacKeys", + Handler: _Storage_ListHmacKeys_Handler, + }, + { + MethodName: "UpdateHmacKey", + Handler: _Storage_UpdateHmacKey_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ReadObject", + Handler: _Storage_ReadObject_Handler, + ServerStreams: true, + }, + { + StreamName: "WriteObject", + Handler: _Storage_WriteObject_Handler, + ClientStreams: true, + }, + }, + Metadata: "google/storage/v2/storage.proto", +} diff --git a/vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go b/vendor/cloud.google.com/go/storage/internal/apiv2/version.go similarity index 62% rename from vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go rename to vendor/cloud.google.com/go/storage/internal/apiv2/version.go index 7df7a1d7155a2..15920f3f63a45 100644 --- a/vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/version.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,11 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the cloud.google.com/go import, won't actually become part of -// the resultant binary. -// +build modhack +// Code generated by gapicgen. DO NOT EDIT. package storage -// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "cloud.google.com/go" +import "cloud.google.com/go/storage/internal" + +func init() { + versionClient = internal.Version +} diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go new file mode 100644 index 0000000000000..a08cb7cabc62d --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.29.0" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go index e755f197de802..810d64285d0ce 100644 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -16,22 +16,131 @@ package storage import ( "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strings" "cloud.google.com/go/internal" + "cloud.google.com/go/internal/version" + sinternal "cloud.google.com/go/storage/internal" + "github.com/google/uuid" gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/googleapi" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) -// runWithRetry calls the function until it returns nil or a non-retryable error, or -// the context is done. -func runWithRetry(ctx context.Context, call func() error) error { - return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { +var defaultRetry *retryConfig = &retryConfig{} +var xGoogDefaultHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), sinternal.Version) + +// run determines whether a retry is necessary based on the config and +// idempotency information. It then calls the function with or without retries +// as appropriate, using the configured settings. +func run(ctx context.Context, call func() error, retry *retryConfig, isIdempotent bool, setHeader func(string, int)) error { + attempts := 1 + invocationID := uuid.New().String() + + if retry == nil { + retry = defaultRetry + } + if (retry.policy == RetryIdempotent && !isIdempotent) || retry.policy == RetryNever { + setHeader(invocationID, attempts) + return call() + } + bo := gax.Backoff{} + if retry.backoff != nil { + bo.Multiplier = retry.backoff.Multiplier + bo.Initial = retry.backoff.Initial + bo.Max = retry.backoff.Max + } + var errorFunc func(err error) bool = ShouldRetry + if retry.shouldRetry != nil { + errorFunc = retry.shouldRetry + } + + return internal.Retry(ctx, bo, func() (stop bool, err error) { + setHeader(invocationID, attempts) err = call() - if err == nil { - return true, nil + attempts++ + return !errorFunc(err), err + }) +} + +func setRetryHeaderHTTP(req interface{ Header() http.Header }) func(string, int) { + return func(invocationID string, attempts int) { + if req == nil { + return } - if shouldRetry(err) { - return false, nil + header := req.Header() + invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts) + xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ") + header.Set("x-goog-api-client", xGoogHeader) + } +} + +// TODO: Implement method setting header via context for gRPC +func setRetryHeaderGRPC(_ context.Context) func(string, int) { + return func(_ string, _ int) { + return + } +} + +// ShouldRetry returns true if an error is retryable, based on best practice +// guidance from GCS. See +// https://cloud.google.com/storage/docs/retry-strategy#go for more information +// on what errors are considered retryable. +// +// If you would like to customize retryable errors, use the WithErrorFunc to +// supply a RetryOption to your library calls. For example, to retry additional +// errors, you can write a custom func that wraps ShouldRetry and also specifies +// additional errors that should return true. +func ShouldRetry(err error) bool { + if err == nil { + return false + } + if errors.Is(err, io.ErrUnexpectedEOF) { + return true + } + + switch e := err.(type) { + case *net.OpError: + if strings.Contains(e.Error(), "use of closed network connection") { + // TODO: check against net.ErrClosed (go 1.16+) instead of string + return true } - return true, err - }) + case *googleapi.Error: + // Retry on 408, 429, and 5xx, according to + // https://cloud.google.com/storage/docs/exponential-backoff. + return e.Code == 408 || e.Code == 429 || (e.Code >= 500 && e.Code < 600) + case *url.Error: + // Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall). + // Unfortunately the error type is unexported, so we resort to string + // matching. + retriable := []string{"connection refused", "connection reset"} + for _, s := range retriable { + if strings.Contains(e.Error(), s) { + return true + } + } + case interface{ Temporary() bool }: + if e.Temporary() { + return true + } + } + // HTTP 429, 502, 503, and 504 all map to gRPC UNAVAILABLE per + // https://grpc.github.io/grpc/core/md_doc_http-grpc-status-mapping.html. + // + // This is only necessary for the experimental gRPC-based media operations. + if st, ok := status.FromError(err); ok && st.Code() == codes.Unavailable { + return true + } + // Unwrap is only supported in go1.13.x+ + if e, ok := err.(interface{ Unwrap() error }); ok { + return ShouldRetry(e.Unwrap()) + } + return false } diff --git a/vendor/cloud.google.com/go/storage/not_go110.go b/vendor/cloud.google.com/go/storage/not_go110.go deleted file mode 100644 index 66fa45bea2fed..0000000000000 --- a/vendor/cloud.google.com/go/storage/not_go110.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.10 - -package storage - -import ( - "net/url" - "strings" - - "google.golang.org/api/googleapi" -) - -func shouldRetry(err error) bool { - switch e := err.(type) { - case *googleapi.Error: - // Retry on 429 and 5xx, according to - // https://cloud.google.com/storage/docs/exponential-backoff. - return e.Code == 429 || (e.Code >= 500 && e.Code < 600) - case *url.Error: - // Retry on REFUSED_STREAM. - // Unfortunately the error type is unexported, so we resort to string - // matching. - return strings.Contains(e.Error(), "REFUSED_STREAM") - case interface{ Temporary() bool }: - return e.Temporary() - default: - return false - } -} diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go index 84619b6d58c7a..614feb7b6daf5 100644 --- a/vendor/cloud.google.com/go/storage/notifications.go +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -21,6 +21,7 @@ import ( "regexp" "cloud.google.com/go/internal/trace" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" raw "google.golang.org/api/storage/v1" ) @@ -91,6 +92,30 @@ func toNotification(rn *raw.Notification) *Notification { return n } +func toNotificationFromProto(pbn *storagepb.Notification) *Notification { + n := &Notification{ + ID: pbn.GetName(), + EventTypes: pbn.GetEventTypes(), + ObjectNamePrefix: pbn.GetObjectNamePrefix(), + CustomAttributes: pbn.GetCustomAttributes(), + PayloadFormat: pbn.GetPayloadFormat(), + } + n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic) + return n +} + +func toProtoNotification(n *Notification) *storagepb.Notification { + return &storagepb.Notification{ + Name: n.ID, + Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", + n.TopicProjectID, n.TopicID), + EventTypes: n.EventTypes, + ObjectNamePrefix: n.ObjectNamePrefix, + CustomAttributes: n.CustomAttributes, + PayloadFormat: n.PayloadFormat, + } +} + var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") // parseNotificationTopic extracts the project and topic IDs from from the full @@ -132,16 +157,10 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (re if n.TopicID == "" { return nil, errors.New("storage: AddNotification: missing TopicID") } - call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n)) - setClientHeader(call.Header()) - if b.userProject != "" { - call.UserProject(b.userProject) - } - rn, err := call.Context(ctx).Do() - if err != nil { - return nil, err - } - return toNotification(rn), nil + + opts := makeStorageOpts(false, b.retry, b.userProject) + ret, err = b.c.tc.CreateNotification(ctx, b.name, n, opts...) + return ret, err } // Notifications returns all the Notifications configured for this bucket, as a map @@ -150,20 +169,9 @@ func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notific ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications") defer func() { trace.EndSpan(ctx, err) }() - call := b.c.raw.Notifications.List(b.name) - setClientHeader(call.Header()) - if b.userProject != "" { - call.UserProject(b.userProject) - } - var res *raw.Notifications - err = runWithRetry(ctx, func() error { - res, err = call.Context(ctx).Do() - return err - }) - if err != nil { - return nil, err - } - return notificationsToMap(res.Items), nil + opts := makeStorageOpts(true, b.retry, b.userProject) + n, err = b.c.tc.ListNotifications(ctx, b.name, opts...) + return n, err } func notificationsToMap(rns []*raw.Notification) map[string]*Notification { @@ -174,15 +182,19 @@ func notificationsToMap(rns []*raw.Notification) map[string]*Notification { return m } +func notificationsToMapFromProto(ns []*storagepb.Notification) map[string]*Notification { + m := map[string]*Notification{} + for _, n := range ns { + m[n.Name] = toNotificationFromProto(n) + } + return m +} + // DeleteNotification deletes the notification with the given ID. func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification") defer func() { trace.EndSpan(ctx, err) }() - call := b.c.raw.Notifications.Delete(b.name, id) - setClientHeader(call.Header()) - if b.userProject != "" { - call.UserProject(b.userProject) - } - return call.Context(ctx).Do() + opts := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.DeleteNotification(ctx, b.name, id, opts...) } diff --git a/vendor/cloud.google.com/go/storage/post_policy_v4.go b/vendor/cloud.google.com/go/storage/post_policy_v4.go index b9df7db958135..2961aca206281 100644 --- a/vendor/cloud.google.com/go/storage/post_policy_v4.go +++ b/vendor/cloud.google.com/go/storage/post_policy_v4.go @@ -52,22 +52,38 @@ type PostPolicyV4Options struct { // Exactly one of PrivateKey or SignBytes must be non-nil. PrivateKey []byte - // SignBytes is a function for implementing custom signing. For example, if + // SignBytes is a function for implementing custom signing. + // + // Deprecated: Use SignRawBytes. If both SignBytes and SignRawBytes are defined, + // SignBytes will be ignored. + // This SignBytes function expects the bytes it receives to be hashed, while + // SignRawBytes accepts the raw bytes without hashing, allowing more flexibility. + // Add the following to the top of your signing function to hash the bytes + // to use SignRawBytes instead: + // shaSum := sha256.Sum256(bytes) + // bytes = shaSum[:] + // + SignBytes func(hashBytes []byte) (signature []byte, err error) + + // SignRawBytes is a function for implementing custom signing. For example, if // your application is running on Google App Engine, you can use // appengine's internal signing function: - // ctx := appengine.NewContext(request) - // acc, _ := appengine.ServiceAccount(ctx) - // url, err := SignedURL("bucket", "object", &SignedURLOptions{ - // GoogleAccessID: acc, - // SignBytes: func(b []byte) ([]byte, error) { - // _, signedBytes, err := appengine.SignBytes(ctx, b) - // return signedBytes, err - // }, - // // etc. - // }) + // ctx := appengine.NewContext(request) + // acc, _ := appengine.ServiceAccount(ctx) + // &PostPolicyV4Options{ + // GoogleAccessID: acc, + // SignRawBytes: func(b []byte) ([]byte, error) { + // _, signedBytes, err := appengine.SignBytes(ctx, b) + // return signedBytes, err + // }, + // // etc. + // }) // - // Exactly one of PrivateKey or SignBytes must be non-nil. - SignBytes func(hashBytes []byte) (signature []byte, err error) + // SignRawBytes is equivalent to the SignBytes field on SignedURLOptions; + // that is, you may use the same signing function for the two. + // + // Exactly one of PrivateKey or SignRawBytes must be non-nil. + SignRawBytes func(bytes []byte) (signature []byte, err error) // Expires is the expiration time on the signed URL. // It must be a time in the future. @@ -96,6 +112,23 @@ type PostPolicyV4Options struct { // a 4XX status code, back with the message describing the problem. // Optional. Conditions []PostPolicyV4Condition + + shouldHashSignBytes bool +} + +func (opts *PostPolicyV4Options) clone() *PostPolicyV4Options { + return &PostPolicyV4Options{ + GoogleAccessID: opts.GoogleAccessID, + PrivateKey: opts.PrivateKey, + SignBytes: opts.SignBytes, + SignRawBytes: opts.SignRawBytes, + Expires: opts.Expires, + Style: opts.Style, + Insecure: opts.Insecure, + Fields: opts.Fields, + Conditions: opts.Conditions, + shouldHashSignBytes: opts.shouldHashSignBytes, + } } // PolicyV4Fields describes the attributes for a PostPolicyV4 request. @@ -206,6 +239,8 @@ func conditionStatusCodeOnSuccess(statusCode int) PostPolicyV4Condition { // GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts. // The generated URL and fields will then allow an unauthenticated client to perform multipart uploads. +// If initializing a Storage Client, instead use the Bucket.GenerateSignedPostPolicyV4 +// method which uses the Client's credentials to handle authentication. func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { if bucket == "" { return nil, errors.New("storage: bucket must be non-empty") @@ -220,20 +255,22 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options var signingFn func(hashedBytes []byte) ([]byte, error) switch { - case opts.SignBytes != nil: + case opts.SignRawBytes != nil: + signingFn = opts.SignRawBytes + case opts.shouldHashSignBytes: signingFn = opts.SignBytes - case len(opts.PrivateKey) != 0: parsedRSAPrivKey, err := parseKey(opts.PrivateKey) if err != nil { return nil, err } - signingFn = func(hashedBytes []byte) ([]byte, error) { - return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, hashedBytes) + signingFn = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, sum[:]) } default: - return nil, errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + return nil, errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set") } var descFields PolicyV4Fields @@ -249,10 +286,16 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options conds := make([]PostPolicyV4Condition, len(opts.Conditions)) copy(conds, opts.Conditions) conds = append(conds, - conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess), - conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess), + // These are ordered lexicographically. Technically the order doesn't matter + // for creating the policy, but we use this order to match the + // cross-language conformance tests for this feature. &singleValueCondition{"acl", descFields.ACL}, &singleValueCondition{"cache-control", descFields.CacheControl}, + &singleValueCondition{"content-disposition", descFields.ContentDisposition}, + &singleValueCondition{"content-encoding", descFields.ContentEncoding}, + &singleValueCondition{"content-type", descFields.ContentType}, + conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess), + conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess), ) YYYYMMDD := now.Format(yearMonthDay) @@ -261,8 +304,12 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options "x-goog-date": now.Format(iso8601), "x-goog-credential": opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", "x-goog-algorithm": "GOOG4-RSA-SHA256", - "success_action_redirect": descFields.RedirectToURLOnSuccess, "acl": descFields.ACL, + "cache-control": descFields.CacheControl, + "content-disposition": descFields.ContentDisposition, + "content-encoding": descFields.ContentEncoding, + "content-type": descFields.ContentType, + "success_action_redirect": descFields.RedirectToURLOnSuccess, } for key, value := range descFields.Metadata { conds = append(conds, &singleValueCondition{key, value}) @@ -293,14 +340,22 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options "expiration": opts.Expires.Format(time.RFC3339), }) if err != nil { - return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %v", err) + return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %w", err) } b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON) - shaSum := sha256.Sum256([]byte(b64Policy)) - signature, err := signingFn(shaSum[:]) - if err != nil { - return nil, err + var signature []byte + var signErr error + + if opts.shouldHashSignBytes { + // SignBytes expects hashed bytes as input instead of raw bytes, so we hash them + shaSum := sha256.Sum256([]byte(b64Policy)) + signature, signErr = signingFn(shaSum[:]) + } else { + signature, signErr = signingFn([]byte(b64Policy)) + } + if signErr != nil { + return nil, signErr } policyFields["policy"] = b64Policy @@ -338,15 +393,16 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options // validatePostPolicyV4Options checks that: // * GoogleAccessID is set -// * either but not both PrivateKey and SignBytes are set or nil, but not both -// * Expires, the deadline is not in the past +// * either PrivateKey or SignRawBytes/SignBytes is set, but not both +// * the deadline set in Expires is not in the past // * if Style is not set, it'll use PathStyle +// * sets shouldHashSignBytes to true if opts.SignBytes should be used func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error { if opts == nil || opts.GoogleAccessID == "" { return errors.New("storage: missing required GoogleAccessID") } - if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil; privBlank == signBlank { - return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil && opts.SignRawBytes == nil; privBlank == signBlank { + return errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set") } if opts.Expires.Before(now) { return errors.New("storage: expecting Expires to be in the future") @@ -354,6 +410,9 @@ func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error if opts.Style == nil { opts.Style = PathStyle() } + if opts.SignRawBytes == nil && opts.SignBytes != nil { + opts.shouldHashSignBytes = true + } return nil } diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index d64f5ec778c3d..46487d2b77de6 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -16,20 +16,15 @@ package storage import ( "context" - "errors" "fmt" "hash/crc32" "io" "io/ioutil" "net/http" - "net/url" - "reflect" - "strconv" "strings" "time" "cloud.google.com/go/internal/trace" - "google.golang.org/api/googleapi" ) var crc32cTable = crc32.MakeTable(crc32.Castagnoli) @@ -106,194 +101,31 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) return nil, err } } - u := &url.URL{ - Scheme: o.c.scheme, - Host: o.c.readHost, - Path: fmt.Sprintf("/%s/%s", o.bucket, o.object), - } - verb := "GET" - if length == 0 { - verb = "HEAD" - } - req, err := http.NewRequest(verb, u.String(), nil) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - if o.userProject != "" { - req.Header.Set("X-Goog-User-Project", o.userProject) - } - if o.readCompressed { - req.Header.Set("Accept-Encoding", "gzip") - } - if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { - return nil, err - } - - gen := o.gen - - // Define a function that initiates a Read with offset and length, assuming we - // have already read seen bytes. - reopen := func(seen int64) (*http.Response, error) { - start := offset + seen - if length < 0 && start < 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d", start)) - } else if length < 0 && start > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start)) - } else if length > 0 { - // The end character isn't affected by how many bytes we've seen. - req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1)) - } - // We wait to assign conditions here because the generation number can change in between reopen() runs. - req.URL.RawQuery = conditionsQuery(gen, o.conds) - var res *http.Response - err = runWithRetry(ctx, func() error { - res, err = o.c.hc.Do(req) - if err != nil { - return err - } - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return ErrObjectNotExist - } - if res.StatusCode < 200 || res.StatusCode > 299 { - body, _ := ioutil.ReadAll(res.Body) - res.Body.Close() - return &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - Body: string(body), - } - } - - partialContentNotSatisfied := - !decompressiveTranscoding(res) && - start > 0 && length != 0 && - res.StatusCode != http.StatusPartialContent - - if partialContentNotSatisfied { - res.Body.Close() - return errors.New("storage: partial request not satisfied") - } - - // With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves - // back the whole file regardless of the range count passed in as per: - // https://cloud.google.com/storage/docs/transcoding#range, - // thus we have to manually move the body forward by seen bytes. - if decompressiveTranscoding(res) && seen > 0 { - _, _ = io.CopyN(ioutil.Discard, res.Body, seen) - } - - // If a generation hasn't been specified, and this is the first response we get, let's record the - // generation. In future requests we'll use this generation as a precondition to avoid data races. - if gen < 0 && res.Header.Get("X-Goog-Generation") != "" { - gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) - if err != nil { - return err - } - gen = gen64 - } - return nil - }) - if err != nil { - return nil, err - } - return res, nil - } - - res, err := reopen(0) - if err != nil { - return nil, err - } - var ( - size int64 // total size of object, even if a range was requested. - checkCRC bool - crc uint32 - startOffset int64 // non-zero if range request. - ) - if res.StatusCode == http.StatusPartialContent { - cr := strings.TrimSpace(res.Header.Get("Content-Range")) - if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } - size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) - if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } - - dashIndex := strings.Index(cr, "-") - if dashIndex >= 0 { - startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64) - if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q: %v", cr, err) - } - } - } else { - size = res.ContentLength - // Check the CRC iff all of the following hold: - // - We asked for content (length != 0). - // - We got all the content (status != PartialContent). - // - The server sent a CRC header. - // - The Go http stack did not uncompress the file. - // - We were not served compressed data that was uncompressed on download. - // The problem with the last two cases is that the CRC will not match -- GCS - // computes it on the compressed contents, but we compute it on the - // uncompressed contents. - if length != 0 && !res.Uncompressed && !uncompressedByServer(res) { - crc, checkCRC = parseCRC32c(res) - } - } - remain := res.ContentLength - body := res.Body - if length == 0 { - remain = 0 - body.Close() - body = emptyBody - } - var metaGen int64 - if res.Header.Get("X-Goog-Metageneration") != "" { - metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) - if err != nil { - return nil, err - } + opts := makeStorageOpts(true, o.retry, o.userProject) + + params := &newRangeReaderParams{ + bucket: o.bucket, + object: o.object, + gen: o.gen, + offset: offset, + length: length, + encryptionKey: o.encryptionKey, + conds: o.conds, + readCompressed: o.readCompressed, } - var lm time.Time - if res.Header.Get("Last-Modified") != "" { - lm, err = http.ParseTime(res.Header.Get("Last-Modified")) - if err != nil { - return nil, err - } - } + r, err = o.c.tc.NewRangeReader(ctx, params, opts...) - attrs := ReaderObjectAttrs{ - Size: size, - ContentType: res.Header.Get("Content-Type"), - ContentEncoding: res.Header.Get("Content-Encoding"), - CacheControl: res.Header.Get("Cache-Control"), - LastModified: lm, - StartOffset: startOffset, - Generation: gen, - Metageneration: metaGen, - } - return &Reader{ - Attrs: attrs, - body: body, - size: size, - remain: remain, - wantCRC: crc, - checkCRC: checkCRC, - reopen: reopen, - }, nil + return r, err } // decompressiveTranscoding returns true if the request was served decompressed // and different than its original storage form. This happens when the "Content-Encoding" // header is "gzip". // See: -// * https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip -// * https://github.com/googleapis/google-cloud-go/issues/1800 +// - https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip +// - https://github.com/googleapis/google-cloud-go/issues/1800 func decompressiveTranscoding(res *http.Response) bool { // Decompressive Transcoding. return res.Header.Get("Content-Encoding") == "gzip" || @@ -320,6 +152,34 @@ func parseCRC32c(res *http.Response) (uint32, bool) { return 0, false } +// setConditionsHeaders sets precondition request headers for downloads +// using the XML API. It assumes that the conditions have been validated. +func setConditionsHeaders(headers http.Header, conds *Conditions) error { + if conds == nil { + return nil + } + if conds.MetagenerationMatch != 0 { + headers.Set("x-goog-if-metageneration-match", fmt.Sprint(conds.MetagenerationMatch)) + } + switch { + case conds.GenerationMatch != 0: + headers.Set("x-goog-if-generation-match", fmt.Sprint(conds.GenerationMatch)) + case conds.DoesNotExist: + headers.Set("x-goog-if-generation-match", "0") + } + return nil +} + +// Wrap a request to look similar to an apiary library request, in order to +// be used by run(). +type readerRequestWrapper struct { + req *http.Request +} + +func (w *readerRequestWrapper) Header() http.Header { + return w.req.Header +} + var emptyBody = ioutil.NopCloser(strings.NewReader("")) // Reader reads a Cloud Storage object. @@ -330,21 +190,21 @@ var emptyBody = ioutil.NopCloser(strings.NewReader("")) // is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding. type Reader struct { Attrs ReaderObjectAttrs - body io.ReadCloser seen, remain, size int64 checkCRC bool // should we check the CRC? wantCRC uint32 // the CRC32c value the server sent in the header gotCRC uint32 // running crc - reopen func(seen int64) (*http.Response, error) + + reader io.ReadCloser } // Close closes the Reader. It must be called when done reading. func (r *Reader) Close() error { - return r.body.Close() + return r.reader.Close() } func (r *Reader) Read(p []byte) (int, error) { - n, err := r.readWithRetry(p) + n, err := r.reader.Read(p) if r.remain != -1 { r.remain -= int64(n) } @@ -363,35 +223,6 @@ func (r *Reader) Read(p []byte) (int, error) { return n, err } -func (r *Reader) readWithRetry(p []byte) (int, error) { - n := 0 - for len(p[n:]) > 0 { - m, err := r.body.Read(p[n:]) - n += m - r.seen += int64(m) - if !shouldRetryRead(err) { - return n, err - } - // Read failed, but we will try again. Send a ranged read request that takes - // into account the number of bytes we've already seen. - res, err := r.reopen(r.seen) - if err != nil { - // reopen already retries - return n, err - } - r.body.Close() - r.body = res.Body - } - return n, nil -} - -func shouldRetryRead(err error) bool { - if err == nil { - return false - } - return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2") -} - // Size returns the size of the object in bytes. // The returned value is always the same and is not affected by // calls to Read or Close. diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index 20d9518a42da1..7fc3fc4cb9b3a 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -40,11 +40,20 @@ import ( "cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/trace" - "cloud.google.com/go/internal/version" + "cloud.google.com/go/storage/internal" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "github.com/googleapis/gax-go/v2" + "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" raw "google.golang.org/api/storage/v1" + "google.golang.org/api/transport" htransport "google.golang.org/api/transport/http" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/known/fieldmaskpb" + "google.golang.org/protobuf/types/known/timestamppb" ) // Methods which can be used in signed URLs. @@ -55,11 +64,14 @@ var ( ErrBucketNotExist = errors.New("storage: bucket doesn't exist") // ErrObjectNotExist indicates that the object does not exist. ErrObjectNotExist = errors.New("storage: object doesn't exist") + // errMethodNotSupported indicates that the method called is not currently supported by the client. + // TODO: Export this error when launching the transport-agnostic client. + errMethodNotSupported = errors.New("storage: method is not currently supported") // errMethodNotValid indicates that given HTTP method is not valid. errMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys()) ) -var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", version.Repo) +var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", internal.Version) const ( // ScopeFullControl grants permissions to manage your @@ -73,12 +85,19 @@ const ( // ScopeReadWrite grants permissions to manage your // data in Google Cloud Storage. ScopeReadWrite = raw.DevstorageReadWriteScope -) -var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo) + // aes256Algorithm is the AES256 encryption algorithm used with the + // Customer-Supplied Encryption Keys feature. + aes256Algorithm = "AES256" + + // defaultGen indicates the latest object generation by default, + // using a negative value. + defaultGen = int64(-1) +) +// TODO: remove this once header with invocation ID is applied to all methods. func setClientHeader(headers http.Header) { - headers.Set("x-goog-api-client", xGoogHeader) + headers.Set("x-goog-api-client", xGoogDefaultHeader) } // Client is a client for interacting with Google Cloud Storage. @@ -90,62 +109,127 @@ type Client struct { raw *raw.Service // Scheme describes the scheme under the current host. scheme string - // EnvHost is the host set on the STORAGE_EMULATOR_HOST variable. - envHost string // ReadHost is the default host used on the reader. readHost string + // May be nil. + creds *google.Credentials + retry *retryConfig + + // tc is the transport-agnostic client implemented with either gRPC or HTTP. + tc storageClient + // useGRPC flags whether the client uses gRPC. This is needed while the + // integration piece is only partially complete. + // TODO: remove before merging to main. + useGRPC bool } // NewClient creates a new Google Cloud Storage client. -// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. +// The default scope is ScopeFullControl. To use a different scope, like +// ScopeReadOnly, use option.WithScopes. +// +// Clients should be reused instead of created as needed. The methods of Client +// are safe for concurrent use by multiple goroutines. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - var host, readHost, scheme string - if host = os.Getenv("STORAGE_EMULATOR_HOST"); host == "" { - scheme = "https" - readHost = "storage.googleapis.com" + // Use the experimental gRPC client if the env var is set. + // This is an experimental API and not intended for public use. + if withGRPC := os.Getenv("STORAGE_USE_GRPC"); withGRPC != "" { + return newGRPCClient(ctx, opts...) + } + + var creds *google.Credentials + // In general, it is recommended to use raw.NewService instead of htransport.NewClient + // since raw.NewService configures the correct default endpoints when initializing the + // internal http client. However, in our case, "NewRangeReader" in reader.go needs to + // access the http client directly to make requests, so we create the client manually + // here so it can be re-used by both reader.go and raw.NewService. This means we need to + // manually configure the default endpoint options on the http client. Furthermore, we + // need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints. + if host := os.Getenv("STORAGE_EMULATOR_HOST"); host == "" { // Prepend default options to avoid overriding options passed by the user. - opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl), option.WithUserAgent(userAgent)}, opts...) + opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, opts...) + + opts = append(opts, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/")) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/")) + + // Don't error out here. The user may have passed in their own HTTP + // client which does not auth with ADC or other common conventions. + c, err := transport.Creds(ctx, opts...) + if err == nil { + creds = c + opts = append(opts, internaloption.WithCredentials(creds)) + } } else { - scheme = "http" - readHost = host + var hostURL *url.URL + + if strings.Contains(host, "://") { + h, err := url.Parse(host) + if err != nil { + return nil, err + } + hostURL = h + } else { + // Add scheme for user if not supplied in STORAGE_EMULATOR_HOST + // URL is only parsed correctly if it has a scheme, so we build it ourselves + hostURL = &url.URL{Scheme: "http", Host: host} + } + + hostURL.Path = "storage/v1/" + endpoint := hostURL.String() + // Append the emulator host as default endpoint for the user opts = append([]option.ClientOption{option.WithoutAuthentication()}, opts...) + + opts = append(opts, internaloption.WithDefaultEndpoint(endpoint)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(endpoint)) } + // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. hc, ep, err := htransport.NewClient(ctx, opts...) if err != nil { - return nil, fmt.Errorf("dialing: %v", err) + return nil, fmt.Errorf("dialing: %w", err) } - rawService, err := raw.NewService(ctx, option.WithHTTPClient(hc)) + // RawService should be created with the chosen endpoint to take account of user override. + rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc)) if err != nil { - return nil, fmt.Errorf("storage client: %v", err) + return nil, fmt.Errorf("storage client: %w", err) } - if ep == "" { - // Override the default value for BasePath from the raw client. - // TODO: remove when the raw client uses this endpoint as its default (~end of 2020) - rawService.BasePath = "https://storage.googleapis.com/storage/v1/" - } else { - // If the endpoint has been set explicitly, use this for the BasePath - // as well as readHost - rawService.BasePath = ep - u, err := url.Parse(ep) - if err != nil { - return nil, fmt.Errorf("supplied endpoint %v is not valid: %v", ep, err) - } - readHost = u.Host + // Update readHost and scheme with the chosen endpoint. + u, err := url.Parse(ep) + if err != nil { + return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) + } + + tc, err := newHTTPStorageClient(ctx, withClientOptions(opts...)) + if err != nil { + return nil, fmt.Errorf("storage: %w", err) } return &Client{ hc: hc, raw: rawService, - scheme: scheme, - envHost: host, - readHost: readHost, + scheme: u.Scheme, + readHost: u.Host, + creds: creds, + tc: tc, }, nil } +// newGRPCClient creates a new Storage client that initializes a gRPC-based +// client. Calls that have not been implemented in gRPC will panic. +// +// This is an experimental API and not intended for public use. +func newGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + opts = append(defaultGRPCOptions(), opts...) + tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...)) + if err != nil { + return nil, err + } + + return &Client{tc: tc, useGRPC: true}, nil +} + // Close closes the Client. // // Close need not be called at program exit. @@ -153,6 +237,10 @@ func (c *Client) Close() error { // Set fields to nil so that subsequent uses will panic. c.hc = nil c.raw = nil + c.creds = nil + if c.tc != nil { + return c.tc.Close() + } return nil } @@ -193,10 +281,18 @@ type bucketBoundHostname struct { } func (s pathStyle) host(bucket string) string { + if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { + return stripScheme(host) + } + return "storage.googleapis.com" } func (s virtualHostedStyle) host(bucket string) string { + if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { + return bucket + "." + stripScheme(host) + } + return bucket + ".storage.googleapis.com" } @@ -244,6 +340,14 @@ func BucketBoundHostname(hostname string) URLStyle { return bucketBoundHostname{hostname: hostname} } +// Strips the scheme from a host if it contains it +func stripScheme(host string) string { + if strings.Contains(host, "://") { + host = strings.SplitN(host, "://", 2)[1] + } + return host +} + // SignedURLOptions allows you to restrict the access to the signed URL. type SignedURLOptions struct { // GoogleAccessID represents the authorizer of the signed URL generation. @@ -336,6 +440,23 @@ type SignedURLOptions struct { Scheme SigningScheme } +func (opts *SignedURLOptions) clone() *SignedURLOptions { + return &SignedURLOptions{ + GoogleAccessID: opts.GoogleAccessID, + SignBytes: opts.SignBytes, + PrivateKey: opts.PrivateKey, + Method: opts.Method, + Expires: opts.Expires, + ContentType: opts.ContentType, + Headers: opts.Headers, + QueryParameters: opts.QueryParameters, + MD5: opts.MD5, + Style: opts.Style, + Insecure: opts.Insecure, + Scheme: opts.Scheme, + } +} + var ( tabRegex = regexp.MustCompile(`[\t]+`) // I was tempted to call this spacex. :) @@ -349,7 +470,7 @@ var ( ) // v2SanitizeHeaders applies the specifications for canonical extension headers at -// https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers. +// https://cloud.google.com/storage/docs/access-control/signed-urls-v2#about-canonical-extension-headers func v2SanitizeHeaders(hdrs []string) []string { headerMap := map[string][]string{} for _, hdr := range hdrs { @@ -397,16 +518,16 @@ func v2SanitizeHeaders(hdrs []string) []string { } // v4SanitizeHeaders applies the specifications for canonical extension headers -// at https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers. +// at https://cloud.google.com/storage/docs/authentication/canonical-requests#about-headers. // // V4 does a couple things differently from V2: -// - Headers get sorted by key, instead of by key:value. We do this in -// signedURLV4. -// - There's no canonical regexp: we simply split headers on :. -// - We don't exclude canonical headers. -// - We replace leading and trailing spaces in header values, like v2, but also -// all intermediate space duplicates get stripped. That is, there's only ever -// a single consecutive space. +// - Headers get sorted by key, instead of by key:value. We do this in +// signedURLV4. +// - There's no canonical regexp: we simply split headers on :. +// - We don't exclude canonical headers. +// - We replace leading and trailing spaces in header values, like v2, but also +// all intermediate space duplicates get stripped. That is, there's only ever +// a single consecutive space. func v4SanitizeHeaders(hdrs []string) []string { headerMap := map[string][]string{} for _, hdr := range hdrs { @@ -449,11 +570,13 @@ func v4SanitizeHeaders(hdrs []string) []string { return sanitizedHeaders } -// SignedURL returns a URL for the specified object. Signed URLs allow -// the users access to a restricted resource for a limited time without having a -// Google account or signing in. For more information about the signed -// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. -func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { +// SignedURL returns a URL for the specified object. Signed URLs allow anyone +// access to a restricted resource for a limited time without needing a +// Google account or signing in. For more information about signed URLs, see +// https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication +// If initializing a Storage Client, instead use the Bucket.SignedURL method +// which uses the Client's credentials to handle authentication. +func SignedURL(bucket, object string, opts *SignedURLOptions) (string, error) { now := utcNow() if err := validateOptions(opts, now); err != nil { return "", err @@ -462,13 +585,13 @@ func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { switch opts.Scheme { case SigningSchemeV2: opts.Headers = v2SanitizeHeaders(opts.Headers) - return signedURLV2(bucket, name, opts) + return signedURLV2(bucket, object, opts) case SigningSchemeV4: opts.Headers = v4SanitizeHeaders(opts.Headers) - return signedURLV4(bucket, name, opts, now) + return signedURLV4(bucket, object, opts, now) default: // SigningSchemeDefault opts.Headers = v2SanitizeHeaders(opts.Headers) - return signedURLV2(bucket, name, opts) + return signedURLV2(bucket, object, opts) } } @@ -583,8 +706,10 @@ func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (st for k, v := range opts.QueryParameters { canonicalQueryString[k] = append(canonicalQueryString[k], v...) } - - fmt.Fprintf(buf, "%s\n", canonicalQueryString.Encode()) + // url.Values.Encode escaping is correct, except that a space must be replaced + // by `%20` rather than `+`. + escapedQuery := strings.Replace(canonicalQueryString.Encode(), "+", "%20", -1) + fmt.Fprintf(buf, "%s\n", escapedQuery) // Fill in the hostname based on the desired URL style. u.Host = opts.Style.host(bucket) @@ -721,7 +846,7 @@ func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) { } encoded := base64.StdEncoding.EncodeToString(b) u.Scheme = "https" - u.Host = "storage.googleapis.com" + u.Host = PathStyle().host(bucket) q := u.Query() q.Set("GoogleAccessId", opts.GoogleAccessID) q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) @@ -742,6 +867,7 @@ type ObjectHandle struct { encryptionKey []byte // AES-256 key userProject string // for requester-pays buckets readCompressed bool // Accept-Encoding: gzip + retry *retryConfig } // ACL provides access to the object's access control list. @@ -793,30 +919,12 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error if err := o.validate(); err != nil { return nil, err } - call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx) - if err := applyConds("Attrs", o.gen, o.conds, call); err != nil { - return nil, err - } - if o.userProject != "" { - call.UserProject(o.userProject) - } - if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { - return nil, err - } - var obj *raw.Object - setClientHeader(call.Header()) - err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrObjectNotExist - } - if err != nil { - return nil, err - } - return newObject(obj), nil + opts := makeStorageOpts(true, o.retry, o.userProject) + return o.c.tc.GetObject(ctx, o.bucket, o.object, o.gen, o.encryptionKey, o.conds, opts...) } -// Update updates an object with the provided attributes. -// All zero-value attributes are ignored. +// Update updates an object with the provided attributes. See +// ObjectAttrsToUpdate docs for details on treatment of zero values. // ErrObjectNotExist will be returned if the object is not found. func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update") @@ -825,90 +933,9 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) ( if err := o.validate(); err != nil { return nil, err } - var attrs ObjectAttrs - // Lists of fields to send, and set to null, in the JSON. - var forceSendFields, nullFields []string - if uattrs.ContentType != nil { - attrs.ContentType = optional.ToString(uattrs.ContentType) - // For ContentType, sending the empty string is a no-op. - // Instead we send a null. - if attrs.ContentType == "" { - nullFields = append(nullFields, "ContentType") - } else { - forceSendFields = append(forceSendFields, "ContentType") - } - } - if uattrs.ContentLanguage != nil { - attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) - // For ContentLanguage it's an error to send the empty string. - // Instead we send a null. - if attrs.ContentLanguage == "" { - nullFields = append(nullFields, "ContentLanguage") - } else { - forceSendFields = append(forceSendFields, "ContentLanguage") - } - } - if uattrs.ContentEncoding != nil { - attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) - forceSendFields = append(forceSendFields, "ContentEncoding") - } - if uattrs.ContentDisposition != nil { - attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) - forceSendFields = append(forceSendFields, "ContentDisposition") - } - if uattrs.CacheControl != nil { - attrs.CacheControl = optional.ToString(uattrs.CacheControl) - forceSendFields = append(forceSendFields, "CacheControl") - } - if uattrs.EventBasedHold != nil { - attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold) - forceSendFields = append(forceSendFields, "EventBasedHold") - } - if uattrs.TemporaryHold != nil { - attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) - forceSendFields = append(forceSendFields, "TemporaryHold") - } - if uattrs.Metadata != nil { - attrs.Metadata = uattrs.Metadata - if len(attrs.Metadata) == 0 { - // Sending the empty map is a no-op. We send null instead. - nullFields = append(nullFields, "Metadata") - } else { - forceSendFields = append(forceSendFields, "Metadata") - } - } - if uattrs.ACL != nil { - attrs.ACL = uattrs.ACL - // It's an error to attempt to delete the ACL, so - // we don't append to nullFields here. - forceSendFields = append(forceSendFields, "Acl") - } - rawObj := attrs.toRawObject(o.bucket) - rawObj.ForceSendFields = forceSendFields - rawObj.NullFields = nullFields - call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx) - if err := applyConds("Update", o.gen, o.conds, call); err != nil { - return nil, err - } - if o.userProject != "" { - call.UserProject(o.userProject) - } - if uattrs.PredefinedACL != "" { - call.PredefinedAcl(uattrs.PredefinedACL) - } - if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { - return nil, err - } - var obj *raw.Object - setClientHeader(call.Header()) - err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrObjectNotExist - } - if err != nil { - return nil, err - } - return newObject(obj), nil + isIdempotent := o.conds != nil && o.conds.MetagenerationMatch != 0 + opts := makeStorageOpts(isIdempotent, o.retry, o.userProject) + return o.c.tc.UpdateObject(ctx, o.bucket, o.object, &uattrs, o.gen, o.encryptionKey, o.conds, opts...) } // BucketName returns the name of the bucket. @@ -923,15 +950,17 @@ func (o *ObjectHandle) ObjectName() string { // ObjectAttrsToUpdate is used to update the attributes of an object. // Only fields set to non-nil values will be updated. -// Set a field to its zero value to delete it. +// For all fields except CustomTime, set the field to its zero value to delete +// it. CustomTime cannot be deleted or changed to an earlier time once set. // // For example, to change ContentType and delete ContentEncoding and // Metadata, use -// ObjectAttrsToUpdate{ -// ContentType: "text/html", -// ContentEncoding: "", -// Metadata: map[string]string{}, -// } +// +// ObjectAttrsToUpdate{ +// ContentType: "text/html", +// ContentEncoding: "", +// Metadata: map[string]string{}, +// } type ObjectAttrsToUpdate struct { EventBasedHold optional.Bool TemporaryHold optional.Bool @@ -940,7 +969,8 @@ type ObjectAttrsToUpdate struct { ContentEncoding optional.String ContentDisposition optional.String CacheControl optional.String - Metadata map[string]string // set to map[string]string{} to delete + CustomTime time.Time // Cannot be deleted or backdated from its current value. + Metadata map[string]string // Set to map[string]string{} to delete. ACL []ACLRule // If not empty, applies a predefined set of access controls. ACL must be nil. @@ -953,25 +983,11 @@ func (o *ObjectHandle) Delete(ctx context.Context) error { if err := o.validate(); err != nil { return err } - call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx) - if err := applyConds("Delete", o.gen, o.conds, call); err != nil { - return err - } - if o.userProject != "" { - call.UserProject(o.userProject) - } - // Encryption doesn't apply to Delete. - setClientHeader(call.Header()) - err := runWithRetry(ctx, func() error { return call.Do() }) - switch e := err.(type) { - case nil: - return nil - case *googleapi.Error: - if e.Code == http.StatusNotFound { - return ErrObjectNotExist - } - } - return err + // Delete is idempotent if GenerationMatch or Generation have been passed in. + // The default generation is negative to get the latest version of the object. + isIdempotent := (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0 + opts := makeStorageOpts(isIdempotent, o.retry, o.userProject) + return o.c.tc.DeleteObject(ctx, o.bucket, o.object, o.gen, o.conds, opts...) } // ReadCompressed when true causes the read to happen without decompressing. @@ -994,6 +1010,9 @@ func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle { // attribute is specified, the content type will be automatically sniffed // using net/http.DetectContentType. // +// Note that each Writer allocates an internal buffer of size Writer.ChunkSize. +// See the ChunkSize docs for more information. +// // It is the caller's responsibility to call Close when writing is done. To // stop writing without saving the data, cancel the context. func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { @@ -1047,6 +1066,10 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { if !o.RetentionExpirationTime.IsZero() { ret = o.RetentionExpirationTime.Format(time.RFC3339) } + var ct string + if !o.CustomTime.IsZero() { + ct = o.CustomTime.Format(time.RFC3339) + } return &raw.Object{ Bucket: bucket, Name: o.Name, @@ -1061,9 +1084,86 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { StorageClass: o.StorageClass, Acl: toRawObjectACL(o.ACL), Metadata: o.Metadata, + CustomTime: ct, + } +} + +// toProtoObject copies the editable attributes from o to the proto library's Object type. +func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object { + // For now, there are only globally unique buckets, and "_" is the alias + // project ID for such buckets. If the bucket is not provided, like in the + // destination ObjectAttrs of a Copy, do not attempt to format it. + if b != "" { + b = bucketResourceName(globalProjectAlias, b) + } + + return &storagepb.Object{ + Bucket: b, + Name: o.Name, + EventBasedHold: proto.Bool(o.EventBasedHold), + TemporaryHold: o.TemporaryHold, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ContentDisposition: o.ContentDisposition, + StorageClass: o.StorageClass, + Acl: toProtoObjectACL(o.ACL), + Metadata: o.Metadata, + CreateTime: toProtoTimestamp(o.Created), + CustomTime: toProtoTimestamp(o.CustomTime), + DeleteTime: toProtoTimestamp(o.Deleted), + RetentionExpireTime: toProtoTimestamp(o.RetentionExpirationTime), + UpdateTime: toProtoTimestamp(o.Updated), + KmsKey: o.KMSKeyName, + Generation: o.Generation, + Size: o.Size, } } +// toProtoObject copies the attributes to update from uattrs to the proto library's Object type. +func (uattrs *ObjectAttrsToUpdate) toProtoObject(bucket, object string) *storagepb.Object { + o := &storagepb.Object{ + Name: object, + Bucket: bucket, + } + if uattrs == nil { + return o + } + + if uattrs.EventBasedHold != nil { + o.EventBasedHold = proto.Bool(optional.ToBool(uattrs.EventBasedHold)) + } + if uattrs.TemporaryHold != nil { + o.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) + } + if uattrs.ContentType != nil { + o.ContentType = optional.ToString(uattrs.ContentType) + } + if uattrs.ContentLanguage != nil { + o.ContentLanguage = optional.ToString(uattrs.ContentLanguage) + } + if uattrs.ContentEncoding != nil { + o.ContentEncoding = optional.ToString(uattrs.ContentEncoding) + } + if uattrs.ContentDisposition != nil { + o.ContentDisposition = optional.ToString(uattrs.ContentDisposition) + } + if uattrs.CacheControl != nil { + o.CacheControl = optional.ToString(uattrs.CacheControl) + } + if !uattrs.CustomTime.IsZero() { + o.CustomTime = toProtoTimestamp(uattrs.CustomTime) + } + if uattrs.ACL != nil { + o.Acl = toProtoObjectACL(uattrs.ACL) + } + + // TODO(cathyo): Handle metadata. Pending b/230510191. + + return o +} + // ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object. type ObjectAttrs struct { // Bucket is the name of the bucket containing this GCS object. @@ -1135,6 +1235,9 @@ type ObjectAttrs struct { // Composer. In those cases, if the SendCRC32C field in the Writer or Composer // is set to is true, the uploaded data is rejected if its CRC32C hash does // not match this field. + // + // Note: For a Writer, SendCRC32C must be set to true BEFORE the first call to + // Writer.Write() in order to send the checksum. CRC32C uint32 // MediaLink is an URL to the object's content. This field is read-only. @@ -1142,6 +1245,10 @@ type ObjectAttrs struct { // Metadata represents user-provided metadata, in key/value pairs. // It can be nil if no metadata is provided. + // + // For object downloads using Reader, metadata keys are sent as headers. + // Therefore, avoid setting metadata keys using characters that are not valid + // for headers. See https://www.rfc-editor.org/rfc/rfc7230#section-3.2.6. Metadata map[string]string // Generation is the generation number of the object's content. @@ -1199,6 +1306,20 @@ type ObjectAttrs struct { // Etag is the HTTP/1.1 Entity tag for the object. // This field is read-only. Etag string + + // A user-specified timestamp which can be applied to an object. This is + // typically set in order to use the CustomTimeBefore and DaysSinceCustomTime + // LifecycleConditions to manage object lifecycles. + // + // CustomTime cannot be removed once set on an object. It can be updated to a + // later value but not to an earlier one. For more information see + // https://cloud.google.com/storage/docs/metadata#custom-time . + CustomTime time.Time + + // ComponentCount is the number of objects contained within a composite object. + // For non-composite objects, the value will be zero. + // This field is read-only. + ComponentCount int64 } // convertTime converts a time in RFC3339 format to time.Time. @@ -1211,6 +1332,22 @@ func convertTime(t string) time.Time { return r } +func convertProtoTime(t *timestamppb.Timestamp) time.Time { + var r time.Time + if t != nil { + r = t.AsTime() + } + return r +} + +func toProtoTimestamp(t time.Time) *timestamppb.Timestamp { + if t.IsZero() { + return nil + } + + return timestamppb.New(t) +} + func newObject(o *raw.Object) *ObjectAttrs { if o == nil { return nil @@ -1252,6 +1389,43 @@ func newObject(o *raw.Object) *ObjectAttrs { Deleted: convertTime(o.TimeDeleted), Updated: convertTime(o.Updated), Etag: o.Etag, + CustomTime: convertTime(o.CustomTime), + ComponentCount: o.ComponentCount, + } +} + +func newObjectFromProto(o *storagepb.Object) *ObjectAttrs { + if o == nil { + return nil + } + return &ObjectAttrs{ + Bucket: parseBucketName(o.Bucket), + Name: o.Name, + ContentType: o.ContentType, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + EventBasedHold: o.GetEventBasedHold(), + TemporaryHold: o.TemporaryHold, + RetentionExpirationTime: convertProtoTime(o.GetRetentionExpireTime()), + ACL: toObjectACLRulesFromProto(o.GetAcl()), + Owner: o.GetOwner().GetEntity(), + ContentEncoding: o.ContentEncoding, + ContentDisposition: o.ContentDisposition, + Size: int64(o.Size), + MD5: o.GetChecksums().GetMd5Hash(), + CRC32C: o.GetChecksums().GetCrc32C(), + Metadata: o.Metadata, + Generation: o.Generation, + Metageneration: o.Metageneration, + StorageClass: o.StorageClass, + // CustomerKeySHA256 needs to be presented as base64 encoded, but the response from gRPC is not. + CustomerKeySHA256: base64.StdEncoding.EncodeToString(o.GetCustomerEncryption().GetKeySha256Bytes()), + KMSKeyName: o.GetKmsKey(), + Created: convertProtoTime(o.GetCreateTime()), + Deleted: convertProtoTime(o.GetDeleteTime()), + Updated: convertProtoTime(o.GetUpdateTime()), + CustomTime: convertProtoTime(o.GetCustomTime()), + ComponentCount: int64(o.ComponentCount), } } @@ -1273,6 +1447,31 @@ func encodeUint32(u uint32) string { return base64.StdEncoding.EncodeToString(b) } +// Projection is enumerated type for Query.Projection. +type Projection int + +const ( + // ProjectionDefault returns all fields of objects. + ProjectionDefault Projection = iota + + // ProjectionFull returns all fields of objects. + ProjectionFull + + // ProjectionNoACL returns all fields of objects except for Owner and ACL. + ProjectionNoACL +) + +func (p Projection) String() string { + switch p { + case ProjectionFull: + return "full" + case ProjectionNoACL: + return "noAcl" + default: + return "" + } +} + // Query represents a query to filter objects from a bucket. type Query struct { // Delimiter returns results in a directory-like fashion. @@ -1293,10 +1492,35 @@ type Query struct { // object will be included in the results. Versions bool - // fieldSelection is used to select only specific fields to be returned by - // the query. It's used internally and is populated for the user by - // calling Query.SetAttrSelection - fieldSelection string + // attrSelection is used to select only specific fields to be returned by + // the query. It is set by the user calling calling SetAttrSelection. These + // are used by toFieldMask and toFieldSelection for gRPC and HTTP/JSON + // clients repsectively. + attrSelection []string + + // StartOffset is used to filter results to objects whose names are + // lexicographically equal to or after startOffset. If endOffset is also set, + // the objects listed will have names between startOffset (inclusive) and + // endOffset (exclusive). + StartOffset string + + // EndOffset is used to filter results to objects whose names are + // lexicographically before endOffset. If startOffset is also set, the objects + // listed will have names between startOffset (inclusive) and endOffset (exclusive). + EndOffset string + + // Projection defines the set of properties to return. It will default to ProjectionFull, + // which returns all properties. Passing ProjectionNoACL will omit Owner and ACL, + // which may improve performance when listing many objects. + Projection Projection + + // IncludeTrailingDelimiter controls how objects which end in a single + // instance of Delimiter (for example, if Query.Delimiter = "/" and the + // object name is "foo/bar/") are included in the results. By default, these + // objects only show up as prefixes. If IncludeTrailingDelimiter is set to + // true, they will also be included as objects and their metadata will be + // populated in the returned ObjectAttrs. + IncludeTrailingDelimiter bool } // attrToFieldMap maps the field names of ObjectAttrs to the underlying field @@ -1329,6 +1553,42 @@ var attrToFieldMap = map[string]string{ "Deleted": "timeDeleted", "Updated": "updated", "Etag": "etag", + "CustomTime": "customTime", + "ComponentCount": "componentCount", +} + +// attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field +// names in the protobuf Object message. +var attrToProtoFieldMap = map[string]string{ + "Name": "name", + "Bucket": "bucket", + "Etag": "etag", + "Generation": "generation", + "Metageneration": "metageneration", + "StorageClass": "storage_class", + "Size": "size", + "ContentEncoding": "content_encoding", + "ContentDisposition": "content_disposition", + "CacheControl": "cache_control", + "ACL": "acl", + "ContentLanguage": "content_language", + "Deleted": "delete_time", + "ContentType": "content_type", + "Created": "create_time", + "CRC32C": "checksums.crc32c", + "MD5": "checksums.md5_hash", + "Updated": "update_time", + "KMSKeyName": "kms_key", + "TemporaryHold": "temporary_hold", + "RetentionExpirationTime": "retention_expire_time", + "Metadata": "metadata", + "EventBasedHold": "event_based_hold", + "Owner": "owner", + "CustomerKeySHA256": "customer_encryption", + "CustomTime": "custom_time", + "ComponentCount": "component_count", + // MediaLink was explicitly excluded from the proto as it is an HTTP-ism. + // "MediaLink": "mediaLink", } // SetAttrSelection makes the query populate only specific attributes of @@ -1339,19 +1599,45 @@ var attrToFieldMap = map[string]string{ // optimization; for more information, see // https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance func (q *Query) SetAttrSelection(attrs []string) error { + // Validate selections. + for _, attr := range attrs { + // If the attr is acceptable for one of the two sets, then it is OK. + // If it is not acceptable for either, then return an error. + // The respective masking implementations ignore unknown attrs which + // makes switching between transports a little easier. + _, okJSON := attrToFieldMap[attr] + _, okGRPC := attrToProtoFieldMap[attr] + + if !okJSON && !okGRPC { + return fmt.Errorf("storage: attr %v is not valid", attr) + } + } + + q.attrSelection = attrs + + return nil +} + +func (q *Query) toFieldSelection() string { + if q == nil || len(q.attrSelection) == 0 { + return "" + } fieldSet := make(map[string]bool) - for _, attr := range attrs { + for _, attr := range q.attrSelection { field, ok := attrToFieldMap[attr] if !ok { - return fmt.Errorf("storage: attr %v is not valid", attr) + // Future proofing, skip unknown fields, let SetAttrSelection handle + // error modes. + continue } fieldSet[field] = true } + var s string if len(fieldSet) > 0 { var b bytes.Buffer - b.WriteString("items(") + b.WriteString("prefixes,items(") first := true for field := range fieldSet { if !first { @@ -1361,9 +1647,50 @@ func (q *Query) SetAttrSelection(attrs []string) error { b.WriteString(field) } b.WriteString(")") - q.fieldSelection = b.String() + s = b.String() } - return nil + return s +} + +func (q *Query) toFieldMask() *fieldmaskpb.FieldMask { + // The default behavior with no Query is ProjectionDefault (i.e. ProjectionFull). + if q == nil { + return &fieldmaskpb.FieldMask{Paths: []string{"*"}} + } + + // User selected attributes via q.SetAttrSeleciton. This takes precedence + // over the Projection. + if numSelected := len(q.attrSelection); numSelected > 0 { + protoFieldPaths := make([]string, 0, numSelected) + + for _, attr := range q.attrSelection { + pf, ok := attrToProtoFieldMap[attr] + if !ok { + // Future proofing, skip unknown fields, let SetAttrSelection + // handle error modes. + continue + } + protoFieldPaths = append(protoFieldPaths, pf) + } + + return &fieldmaskpb.FieldMask{Paths: protoFieldPaths} + } + + // ProjectDefault == ProjectionFull which means all fields. + fm := &fieldmaskpb.FieldMask{Paths: []string{"*"}} + if q.Projection == ProjectionNoACL { + paths := make([]string, 0, len(attrToProtoFieldMap)-2) // omitting two fields + for _, f := range attrToProtoFieldMap { + // Skip the acl and owner fields for "NoACL". + if f == "acl" || f == "owner" { + continue + } + paths = append(paths, f) + } + fm.Paths = paths + } + + return fm } // Conditions constrain methods to act on specific generations of @@ -1507,6 +1834,33 @@ func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall return nil } +func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.RewriteObjectRequest) error { + if gen >= 0 { + call.SourceGeneration = gen + } + if conds == nil { + return nil + } + if err := conds.validate("CopyTo source"); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + call.IfSourceGenerationMatch = proto.Int64(conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + call.IfSourceGenerationNotMatch = proto.Int64(conds.GenerationNotMatch) + case conds.DoesNotExist: + call.IfSourceGenerationMatch = proto.Int64(0) + } + switch { + case conds.MetagenerationMatch != 0: + call.IfSourceMetagenerationMatch = proto.Int64(conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + call.IfSourceMetagenerationNotMatch = proto.Int64(conds.MetagenerationNotMatch) + } + return nil +} + // setConditionField sets a field on a *raw.WhateverCall. // We can't use anonymous interfaces because the return type is // different, since the field setters are builders. @@ -1519,42 +1873,168 @@ func setConditionField(call reflect.Value, name string, value interface{}) bool return true } -// conditionsQuery returns the generation and conditions as a URL query -// string suitable for URL.RawQuery. It assumes that the conditions -// have been validated. -func conditionsQuery(gen int64, conds *Conditions) string { - // URL escapes are elided because integer strings are URL-safe. - var buf []byte +// Retryer returns an object handle that is configured with custom retry +// behavior as specified by the options that are passed to it. All operations +// on the new handle will use the customized retry configuration. +// These retry options will merge with the bucket's retryer (if set) for the +// returned handle. Options passed into this method will take precedence over +// retry options on the bucket and client. Note that you must explicitly pass in +// each option you want to override. +func (o *ObjectHandle) Retryer(opts ...RetryOption) *ObjectHandle { + o2 := *o + var retry *retryConfig + if o.retry != nil { + // merge the options with the existing retry + retry = o.retry + } else { + retry = &retryConfig{} + } + for _, opt := range opts { + opt.apply(retry) + } + o2.retry = retry + o2.acl.retry = retry + return &o2 +} - appendParam := func(s string, n int64) { - if len(buf) > 0 { - buf = append(buf, '&') - } - buf = append(buf, s...) - buf = strconv.AppendInt(buf, n, 10) +// SetRetry configures the client with custom retry behavior as specified by the +// options that are passed to it. All operations using this client will use the +// customized retry configuration. +// This should be called once before using the client for network operations, as +// there could be indeterminate behaviour with operations in progress. +// Retry options set on a bucket or object handle will take precedence over +// these options. +func (c *Client) SetRetry(opts ...RetryOption) { + var retry *retryConfig + if c.retry != nil { + // merge the options with the existing retry + retry = c.retry + } else { + retry = &retryConfig{} + } + for _, opt := range opts { + opt.apply(retry) } + c.retry = retry +} - if gen >= 0 { - appendParam("generation=", gen) +// RetryOption allows users to configure non-default retry behavior for API +// calls made to GCS. +type RetryOption interface { + apply(config *retryConfig) +} + +// WithBackoff allows configuration of the backoff timing used for retries. +// Available configuration options (Initial, Max and Multiplier) are described +// at https://pkg.go.dev/github.com/googleapis/gax-go/v2#Backoff. If any fields +// are not supplied by the user, gax default values will be used. +func WithBackoff(backoff gax.Backoff) RetryOption { + return &withBackoff{ + backoff: backoff, } - if conds == nil { - return string(buf) +} + +type withBackoff struct { + backoff gax.Backoff +} + +func (wb *withBackoff) apply(config *retryConfig) { + config.backoff = &wb.backoff +} + +// RetryPolicy describes the available policies for which operations should be +// retried. The default is `RetryIdempotent`. +type RetryPolicy int + +const ( + // RetryIdempotent causes only idempotent operations to be retried when the + // service returns a transient error. Using this policy, fully idempotent + // operations (such as `ObjectHandle.Attrs()`) will always be retried. + // Conditionally idempotent operations (for example `ObjectHandle.Update()`) + // will be retried only if the necessary conditions have been supplied (in + // the case of `ObjectHandle.Update()` this would mean supplying a + // `Conditions.MetagenerationMatch` condition is required). + RetryIdempotent RetryPolicy = iota + + // RetryAlways causes all operations to be retried when the service returns a + // transient error, regardless of idempotency considerations. + RetryAlways + + // RetryNever causes the client to not perform retries on failed operations. + RetryNever +) + +// WithPolicy allows the configuration of which operations should be performed +// with retries for transient errors. +func WithPolicy(policy RetryPolicy) RetryOption { + return &withPolicy{ + policy: policy, } - switch { - case conds.GenerationMatch != 0: - appendParam("ifGenerationMatch=", conds.GenerationMatch) - case conds.GenerationNotMatch != 0: - appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch) - case conds.DoesNotExist: - appendParam("ifGenerationMatch=", 0) +} + +type withPolicy struct { + policy RetryPolicy +} + +func (ws *withPolicy) apply(config *retryConfig) { + config.policy = ws.policy +} + +// WithErrorFunc allows users to pass a custom function to the retryer. Errors +// will be retried if and only if `shouldRetry(err)` returns true. +// By default, the following errors are retried (see ShouldRetry for the default +// function): +// +// - HTTP responses with codes 408, 429, 502, 503, and 504. +// +// - Transient network errors such as connection reset and io.ErrUnexpectedEOF. +// +// - Errors which are considered transient using the Temporary() interface. +// +// - Wrapped versions of these errors. +// +// This option can be used to retry on a different set of errors than the +// default. Users can use the default ShouldRetry function inside their custom +// function if they only want to make minor modifications to default behavior. +func WithErrorFunc(shouldRetry func(err error) bool) RetryOption { + return &withErrorFunc{ + shouldRetry: shouldRetry, } - switch { - case conds.MetagenerationMatch != 0: - appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch) - case conds.MetagenerationNotMatch != 0: - appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch) +} + +type withErrorFunc struct { + shouldRetry func(err error) bool +} + +func (wef *withErrorFunc) apply(config *retryConfig) { + config.shouldRetry = wef.shouldRetry +} + +type retryConfig struct { + backoff *gax.Backoff + policy RetryPolicy + shouldRetry func(err error) bool +} + +func (r *retryConfig) clone() *retryConfig { + if r == nil { + return nil + } + + var bo *gax.Backoff + if r.backoff != nil { + bo = &gax.Backoff{ + Initial: r.backoff.Initial, + Max: r.backoff.Max, + Multiplier: r.backoff.Multiplier, + } + } + + return &retryConfig{ + backoff: bo, + policy: r.policy, + shouldRetry: r.shouldRetry, } - return string(buf) } // composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods @@ -1588,19 +2068,146 @@ func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) erro if copySource { cs = "copy-source-" } - headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256") + headers.Set("x-goog-"+cs+"encryption-algorithm", aes256Algorithm) headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key)) keyHash := sha256.Sum256(key) headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:])) return nil } +// toProtoCommonObjectRequestParams sets customer-supplied encryption to the proto library's CommonObjectRequestParams. +func toProtoCommonObjectRequestParams(key []byte) *storagepb.CommonObjectRequestParams { + if key == nil { + return nil + } + keyHash := sha256.Sum256(key) + return &storagepb.CommonObjectRequestParams{ + EncryptionAlgorithm: aes256Algorithm, + EncryptionKeyBytes: key, + EncryptionKeySha256Bytes: keyHash[:], + } +} + +func toProtoChecksums(sendCRC32C bool, attrs *ObjectAttrs) *storagepb.ObjectChecksums { + var checksums *storagepb.ObjectChecksums + if sendCRC32C { + checksums = &storagepb.ObjectChecksums{ + Crc32C: proto.Uint32(attrs.CRC32C), + } + } + if len(attrs.MD5) != 0 { + if checksums == nil { + checksums = &storagepb.ObjectChecksums{ + Md5Hash: attrs.MD5, + } + } else { + checksums.Md5Hash = attrs.MD5 + } + } + return checksums +} + // ServiceAccount fetches the email address of the given project's Google Cloud Storage service account. func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) { - r := c.raw.Projects.ServiceAccount.Get(projectID) - res, err := r.Context(ctx).Do() - if err != nil { - return "", err + o := makeStorageOpts(true, c.retry, "") + return c.tc.GetServiceAccount(ctx, projectID, o...) + +} + +// bucketResourceName formats the given project ID and bucketResourceName ID +// into a Bucket resource name. This is the format necessary for the gRPC API as +// it conforms to the Resource-oriented design practices in https://google.aip.dev/121. +func bucketResourceName(p, b string) string { + return fmt.Sprintf("projects/%s/buckets/%s", p, b) +} + +// parseBucketName strips the leading resource path segment and returns the +// bucket ID, which is the simple Bucket name typical of the v1 API. +func parseBucketName(b string) string { + sep := strings.LastIndex(b, "/") + return b[sep+1:] +} + +// parseProjectNumber consume the given resource name and parses out the project +// number if one is present i.e. it is not a project ID. +func parseProjectNumber(r string) uint64 { + projectID := regexp.MustCompile(`projects\/([0-9]+)\/?`) + if matches := projectID.FindStringSubmatch(r); len(matches) > 0 { + // Capture group follows the matched segment. For example: + // input: projects/123/bars/456 + // output: [projects/123/, 123] + number, err := strconv.ParseUint(matches[1], 10, 64) + if err != nil { + return 0 + } + return number } - return res.EmailAddress, nil + + return 0 +} + +// toProjectResource accepts a project ID and formats it as a Project resource +// name. +func toProjectResource(project string) string { + return fmt.Sprintf("projects/%s", project) +} + +// setConditionProtoField uses protobuf reflection to set named condition field +// to the given condition value if supported on the protobuf message. +// +// This is an experimental API and not intended for public use. +func setConditionProtoField(m protoreflect.Message, f string, v int64) bool { + fields := m.Descriptor().Fields() + if rf := fields.ByName(protoreflect.Name(f)); rf != nil { + m.Set(rf, protoreflect.ValueOfInt64(v)) + return true + } + + return false +} + +// applyCondsProto validates and attempts to set the conditions on a protobuf +// message using protobuf reflection. +// +// This is an experimental API and not intended for public use. +func applyCondsProto(method string, gen int64, conds *Conditions, msg proto.Message) error { + rmsg := msg.ProtoReflect() + + if gen >= 0 { + if !setConditionProtoField(rmsg, "generation", gen) { + return fmt.Errorf("storage: %s: generation not supported", method) + } + } + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + + switch { + case conds.GenerationMatch != 0: + if !setConditionProtoField(rmsg, "if_generation_match", conds.GenerationMatch) { + return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) + } + case conds.GenerationNotMatch != 0: + if !setConditionProtoField(rmsg, "if_generation_not_match", conds.GenerationNotMatch) { + return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) + } + case conds.DoesNotExist: + if !setConditionProtoField(rmsg, "if_generation_match", int64(0)) { + return fmt.Errorf("storage: %s: DoesNotExist not supported", method) + } + } + switch { + case conds.MetagenerationMatch != 0: + if !setConditionProtoField(rmsg, "if_metageneration_match", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionProtoField(rmsg, "if_metageneration_not_match", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil } diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index 1843a8141559a..3a6a1ce0f56c4 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -16,15 +16,12 @@ package storage import ( "context" - "encoding/base64" "errors" "fmt" "io" "sync" + "time" "unicode/utf8" - - "google.golang.org/api/googleapi" - raw "google.golang.org/api/storage/v1" ) // A Writer writes a Cloud Storage object. @@ -34,35 +31,61 @@ type Writer struct { // attributes are ignored. ObjectAttrs - // SendCRC specifies whether to transmit a CRC32C field. It should be set + // SendCRC32C specifies whether to transmit a CRC32C field. It should be set // to true in addition to setting the Writer's CRC32C field, because zero // is a valid CRC and normally a zero would not be transmitted. // If a CRC32C is sent, and the data written does not match the checksum, // the write will be rejected. + // + // Note: SendCRC32C must be set to true BEFORE the first call to + // Writer.Write() in order to send the checksum. If it is set after that + // point, the checksum will be ignored. SendCRC32C bool // ChunkSize controls the maximum number of bytes of the object that the // Writer will attempt to send to the server in a single request. Objects // smaller than the size will be sent in a single request, while larger - // objects will be split over multiple requests. The size will be rounded up - // to the nearest multiple of 256K. + // objects will be split over multiple requests. The value will be rounded up + // to the nearest multiple of 256K. The default ChunkSize is 16MiB. + // + // Each Writer will internally allocate a buffer of size ChunkSize. This is + // used to buffer input data and allow for the input to be sent again if a + // request must be retried. // - // ChunkSize will default to a reasonable value. If you perform many - // concurrent writes of small objects (under ~8MB), you may wish set ChunkSize - // to a value that matches your objects' sizes to avoid consuming large - // amounts of memory. See + // If you upload small objects (< 16MiB), you should set ChunkSize + // to a value slightly larger than the objects' sizes to avoid memory bloat. + // This is especially important if you are uploading many small objects + // concurrently. See // https://cloud.google.com/storage/docs/json_api/v1/how-tos/upload#size // for more information about performance trade-offs related to ChunkSize. // // If ChunkSize is set to zero, chunking will be disabled and the object will // be uploaded in a single request without the use of a buffer. This will // further reduce memory used during uploads, but will also prevent the writer - // from retrying in case of a transient error from the server, since a buffer - // is required in order to retry the failed request. + // from retrying in case of a transient error from the server or resuming an + // upload that fails midway through, since the buffer is required in order to + // retry the failed request. // // ChunkSize must be set before the first Write call. ChunkSize int + // ChunkRetryDeadline sets a per-chunk retry deadline for multi-chunk + // resumable uploads. + // + // For uploads of larger files, the Writer will attempt to retry if the + // request to upload a particular chunk fails with a transient error. + // If a single chunk has been attempting to upload for longer than this + // deadline and the request fails, it will no longer be retried, and the error + // will be returned to the caller. This is only applicable for files which are + // large enough to require a multi-chunk resumable upload. The default value + // is 32s. Users may want to pick a longer deadline if they are using larger + // values for ChunkSize or if they expect to have a slow or unreliable + // internet connection. + // + // To set a deadline on the entire upload, use context timeout or + // cancellation. + ChunkRetryDeadline time.Duration + // ProgressFunc can be used to monitor the progress of a large write. // operation. If ProgressFunc is not nil and writing requires multiple // calls to the underlying service (see @@ -86,96 +109,6 @@ type Writer struct { err error } -func (w *Writer) open() error { - attrs := w.ObjectAttrs - // Check the developer didn't change the object Name (this is unfortunate, but - // we don't want to store an object under the wrong name). - if attrs.Name != w.o.object { - return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) - } - if !utf8.ValidString(attrs.Name) { - return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) - } - if attrs.KMSKeyName != "" && w.o.encryptionKey != nil { - return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key") - } - pr, pw := io.Pipe() - w.pw = pw - w.opened = true - - go w.monitorCancel() - - if w.ChunkSize < 0 { - return errors.New("storage: Writer.ChunkSize must be non-negative") - } - mediaOpts := []googleapi.MediaOption{ - googleapi.ChunkSize(w.ChunkSize), - } - if c := attrs.ContentType; c != "" { - mediaOpts = append(mediaOpts, googleapi.ContentType(c)) - } - - go func() { - defer close(w.donec) - - rawObj := attrs.toRawObject(w.o.bucket) - if w.SendCRC32C { - rawObj.Crc32c = encodeUint32(attrs.CRC32C) - } - if w.MD5 != nil { - rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5) - } - if w.o.c.envHost != "" { - w.o.c.raw.BasePath = fmt.Sprintf("%s://%s", w.o.c.scheme, w.o.c.envHost) - } - call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj). - Media(pr, mediaOpts...). - Projection("full"). - Context(w.ctx). - Name(w.o.object) - - if w.ProgressFunc != nil { - call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) }) - } - if attrs.KMSKeyName != "" { - call.KmsKeyName(attrs.KMSKeyName) - } - if attrs.PredefinedACL != "" { - call.PredefinedAcl(attrs.PredefinedACL) - } - if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil { - w.mu.Lock() - w.err = err - w.mu.Unlock() - pr.CloseWithError(err) - return - } - var resp *raw.Object - err := applyConds("NewWriter", w.o.gen, w.o.conds, call) - if err == nil { - if w.o.userProject != "" { - call.UserProject(w.o.userProject) - } - setClientHeader(call.Header()) - - // The internals that perform call.Do automatically retry both the initial - // call to set up the upload as well as calls to upload individual chunks - // for a resumable upload (as long as the chunk size is non-zero). Hence - // there is no need to add retries here. - resp, err = call.Do() - } - if err != nil { - w.mu.Lock() - w.err = err - w.mu.Unlock() - pr.CloseWithError(err) - return - } - w.obj = newObject(resp) - }() - return nil -} - // Write appends to w. It implements the io.Writer interface. // // Since writes happen asynchronously, Write may return a nil @@ -193,7 +126,7 @@ func (w *Writer) Write(p []byte) (n int, err error) { return 0, werr } if !w.opened { - if err := w.open(); err != nil { + if err := w.openWriter(); err != nil { return 0, err } } @@ -205,7 +138,7 @@ func (w *Writer) Write(p []byte) (n int, err error) { // Preserve existing functionality that when context is canceled, Write will return // context.Canceled instead of "io: read/write on closed pipe". This hides the // pipe implementation detail from users and makes Write seem as though it's an RPC. - if werr == context.Canceled || werr == context.DeadlineExceeded { + if errors.Is(werr, context.Canceled) || errors.Is(werr, context.DeadlineExceeded) { return n, werr } } @@ -217,7 +150,7 @@ func (w *Writer) Write(p []byte) (n int, err error) { // can be retrieved by calling Attrs. func (w *Writer) Close() error { if !w.opened { - if err := w.open(); err != nil { + if err := w.openWriter(); err != nil { return err } } @@ -233,6 +166,43 @@ func (w *Writer) Close() error { return w.err } +func (w *Writer) openWriter() (err error) { + if err := w.validateWriteAttrs(); err != nil { + return err + } + if w.o.gen != defaultGen { + return fmt.Errorf("storage: generation not supported on Writer, got %v", w.o.gen) + } + + isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true) + opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject) + params := &openWriterParams{ + ctx: w.ctx, + chunkSize: w.ChunkSize, + chunkRetryDeadline: w.ChunkRetryDeadline, + bucket: w.o.bucket, + attrs: &w.ObjectAttrs, + conds: w.o.conds, + encryptionKey: w.o.encryptionKey, + sendCRC32C: w.SendCRC32C, + donec: w.donec, + setError: w.error, + progress: w.progress, + setObj: func(o *ObjectAttrs) { w.obj = o }, + } + if err := w.ctx.Err(); err != nil { + return err // short-circuit + } + w.pw, err = w.o.c.tc.OpenWriter(params, opts...) + if err != nil { + return err + } + w.opened = true + go w.monitorCancel() + + return nil +} + // monitorCancel is intended to be used as a background goroutine. It monitors the // context, and when it observes that the context has been canceled, it manually // closes things that do not take a context. @@ -266,3 +236,38 @@ func (w *Writer) CloseWithError(err error) error { func (w *Writer) Attrs() *ObjectAttrs { return w.obj } + +func (w *Writer) validateWriteAttrs() error { + attrs := w.ObjectAttrs + // Check the developer didn't change the object Name (this is unfortunate, but + // we don't want to store an object under the wrong name). + if attrs.Name != w.o.object { + return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) + } + if !utf8.ValidString(attrs.Name) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) + } + if attrs.KMSKeyName != "" && w.o.encryptionKey != nil { + return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key") + } + if w.ChunkSize < 0 { + return errors.New("storage: Writer.ChunkSize must be non-negative") + } + return nil +} + +// progress is a convenience wrapper that reports write progress to the Writer +// ProgressFunc if it is set and progress is non-zero. +func (w *Writer) progress(p int64) { + if w.ProgressFunc != nil && p != 0 { + w.ProgressFunc(p) + } +} + +// error acquires the Writer's lock, sets the Writer's err to the given error, +// then relinquishes the lock. +func (w *Writer) error(err error) { + w.mu.Lock() + w.err = err + w.mu.Unlock() +} diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index fd2b3a42b2a2e..087320da7f0f5 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -13,21 +13,21 @@ // // The primary features of cmp are: // -// • When the default behavior of equality does not suit the needs of the test, -// custom equality functions can override the equality operation. -// For example, an equality function may report floats as equal so long as they -// are within some tolerance of each other. +// - When the default behavior of equality does not suit the test's needs, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as +// they are within some tolerance of each other. // -// • Types that have an Equal method may use that method to determine equality. -// This allows package authors to determine the equality operation for the types -// that they define. +// - Types with an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation +// for the types that they define. // -// • If no custom equality functions are used and no Equal method is defined, -// equality is determined by recursively comparing the primitive kinds on both -// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported -// fields are not compared by default; they result in panics unless suppressed -// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly -// compared using the Exporter option. +// - If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on +// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, +// unexported fields are not compared by default; they result in panics +// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported) +// or explicitly compared using the Exporter option. package cmp import ( @@ -45,25 +45,25 @@ import ( // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // -// • Let S be the set of all Ignore, Transformer, and Comparer options that -// remain after applying all path filters, value filters, and type filters. -// If at least one Ignore exists in S, then the comparison is ignored. -// If the number of Transformer and Comparer options in S is greater than one, -// then Equal panics because it is ambiguous which option to use. -// If S contains a single Transformer, then use that to transform the current -// values and recursively call Equal on the output values. -// If S contains a single Comparer, then use that to compare the current values. -// Otherwise, evaluation proceeds to the next rule. +// - Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is non-zero, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform +// the current values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. // -// • If the values have an Equal method of the form "(T) Equal(T) bool" or -// "(T) Equal(I) bool" where T is assignable to I, then use the result of -// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and -// evaluation proceeds to the next rule. +// - If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. // -// • Lastly, try to compare x and y based on their basic kinds. -// Simple kinds like booleans, integers, floats, complex numbers, strings, and -// channels are compared using the equivalent of the == operator in Go. -// Functions are only equal if they are both nil, otherwise they are unequal. +// - Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, +// and channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. // // Structs are equal if recursively calling Equal on all fields report equal. // If a struct contains unexported fields, Equal panics unless an Ignore option @@ -144,7 +144,7 @@ func rootStep(x, y interface{}) PathStep { // so that they have the same parent type. var t reflect.Type if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { - t = reflect.TypeOf((*interface{})(nil)).Elem() + t = anyType if vx.IsValid() { vvx := reflect.New(t).Elem() vvx.Set(vx) @@ -639,7 +639,9 @@ type dynChecker struct{ curr, next int } // Next increments the state and reports whether a check should be performed. // // Checks occur every Nth function call, where N is a triangular number: +// // 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// // See https://en.wikipedia.org/wiki/Triangular_number // // This sequence ensures that the cost of checks drops significantly as diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index bc196b16cfaad..a248e5436d98e 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -127,9 +127,9 @@ var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 // This function returns an edit-script, which is a sequence of operations // needed to convert one list into the other. The following invariants for // the edit-script are maintained: -// • eq == (es.Dist()==0) -// • nx == es.LenX() -// • ny == es.LenY() +// - eq == (es.Dist()==0) +// - nx == es.LenX() +// - ny == es.LenY() // // This algorithm is not guaranteed to be an optimal solution (i.e., one that // produces an edit-script with a minimal Levenshtein distance). This algorithm @@ -169,12 +169,13 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // A diagonal edge is equivalent to a matching symbol between both X and Y. // Invariants: - // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx - // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny // // In general: - // • fwdFrontier.X < revFrontier.X - // • fwdFrontier.Y < revFrontier.Y + // - fwdFrontier.X < revFrontier.X + // - fwdFrontier.Y < revFrontier.Y + // // Unless, it is time for the algorithm to terminate. fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} revPath := path{-1, point{nx, ny}, make(EditScript, 0)} @@ -195,19 +196,21 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // computing sub-optimal edit-scripts between two lists. // // The algorithm is approximately as follows: - // • Searching for differences switches back-and-forth between - // a search that starts at the beginning (the top-left corner), and - // a search that starts at the end (the bottom-right corner). The goal of - // the search is connect with the search from the opposite corner. - // • As we search, we build a path in a greedy manner, where the first - // match seen is added to the path (this is sub-optimal, but provides a - // decent result in practice). When matches are found, we try the next pair - // of symbols in the lists and follow all matches as far as possible. - // • When searching for matches, we search along a diagonal going through - // through the "frontier" point. If no matches are found, we advance the - // frontier towards the opposite corner. - // • This algorithm terminates when either the X coordinates or the - // Y coordinates of the forward and reverse frontier points ever intersect. + // - Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). + // The goal of the search is connect with the search + // from the opposite corner. + // - As we search, we build a path in a greedy manner, + // where the first match seen is added to the path (this is sub-optimal, + // but provides a decent result in practice). When matches are found, + // we try the next pair of symbols in the lists and follow all matches + // as far as possible. + // - When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, + // we advance the frontier towards the opposite corner. + // - This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. // This algorithm is correct even if searching only in the forward direction // or in the reverse direction. We do both because it is commonly observed @@ -389,6 +392,7 @@ type point struct{ X, Y int } func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } // zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// // [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] func zigzag(x int) int { if x&1 != 0 { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go deleted file mode 100644 index 9147a29973110..0000000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package value - -import ( - "math" - "reflect" -) - -// IsZero reports whether v is the zero value. -// This does not rely on Interface and so can be used on unexported fields. -func IsZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() == false - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 - case reflect.String: - return v.String() == "" - case reflect.UnsafePointer: - return v.Pointer() == 0 - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !IsZero(v.Index(i)) { - return false - } - } - return true - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !IsZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index e57b9eb5392d2..1f9ca9c4892b5 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -33,6 +33,7 @@ type Option interface { } // applicableOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Grouping: Options type applicableOption interface { @@ -43,6 +44,7 @@ type applicableOption interface { } // coreOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Filters: *pathFilter | *valuesFilter type coreOption interface { @@ -336,9 +338,9 @@ func (tr transformer) String() string { // both implement T. // // The equality function must be: -// • Symmetric: equal(x, y) == equal(y, x) -// • Deterministic: equal(x, y) == equal(x, y) -// • Pure: equal(x, y) does not modify x or y +// - Symmetric: equal(x, y) == equal(y, x) +// - Deterministic: equal(x, y) == equal(x, y) +// - Pure: equal(x, y) does not modify x or y func Comparer(f interface{}) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.Equal) || v.IsNil() { @@ -430,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option { } // Result represents the comparison result for a single node and -// is provided by cmp when calling Result (see Reporter). +// is provided by cmp when calling Report (see Reporter). type Result struct { _ [0]func() // Make Result incomparable flags resultFlags diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index c7100346323b4..a0a588502ed67 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -41,13 +41,13 @@ type PathStep interface { // The type of each valid value is guaranteed to be identical to Type. // // In some cases, one or both may be invalid or have restrictions: - // • For StructField, both are not interface-able if the current field - // is unexported and the struct type is not explicitly permitted by - // an Exporter to traverse unexported fields. - // • For SliceIndex, one may be invalid if an element is missing from - // either the x or y slice. - // • For MapIndex, one may be invalid if an entry is missing from - // either the x or y map. + // - For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // an Exporter to traverse unexported fields. + // - For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // - For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. // // The provided values must not be mutated. Values() (vx, vy reflect.Value) @@ -94,6 +94,7 @@ func (pa Path) Index(i int) PathStep { // The simplified path only contains struct field accesses. // // For example: +// // MyMap.MySlices.MyField func (pa Path) String() string { var ss []string @@ -108,6 +109,7 @@ func (pa Path) String() string { // GoString returns the path to a specific node using Go syntax. // // For example: +// // (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField func (pa Path) GoString() string { var ssPre, ssPost []string @@ -159,7 +161,7 @@ func (ps pathStep) String() string { if ps.typ == nil { return "" } - s := ps.typ.String() + s := value.TypeString(ps.typ, false) if s == "" || strings.ContainsAny(s, "{}\n") { return "root" // Type too simple or complex to print } @@ -282,7 +284,7 @@ type typeAssertion struct { func (ta TypeAssertion) Type() reflect.Type { return ta.typ } func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } -func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } // Transform is a transformation from the parent type to the current type. type Transform struct{ *transform } diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 1ef65ac1db826..2050bf6b46b79 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -7,8 +7,6 @@ package cmp import ( "fmt" "reflect" - - "github.com/google/go-cmp/cmp/internal/value" ) // numContextRecords is the number of surrounding equal records to print. @@ -117,7 +115,7 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out // For leaf nodes, format the value based on the reflect.Values alone. // As a special case, treat equal []byte as a leaf nodes. - isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == reflect.TypeOf(byte(0)) + isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 if v.MaxDepth == 0 || isEqualBytes { switch opts.DiffMode { @@ -248,11 +246,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, pt var isZero bool switch opts.DiffMode { case diffIdentical: - isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero() case diffRemoved: - isZero = value.IsZero(r.Value.ValueX) + isZero = r.Value.ValueX.IsZero() case diffInserted: - isZero = value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueY.IsZero() } if isZero { continue diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 287b893588ee1..2ab41fad3fb55 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -16,6 +16,13 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) +var ( + anyType = reflect.TypeOf((*interface{})(nil)).Elem() + stringType = reflect.TypeOf((*string)(nil)).Elem() + bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() + byteType = reflect.TypeOf((*byte)(nil)).Elem() +) + type formatValueOptions struct { // AvoidStringer controls whether to avoid calling custom stringer // methods like error.Error or fmt.Stringer.String. @@ -184,7 +191,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } for i := 0; i < v.NumField(); i++ { vv := v.Field(i) - if value.IsZero(vv) { + if vv.IsZero() { continue // Elide fields with zero values } if len(list) == maxLen { @@ -205,7 +212,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } // Check whether this is a []byte of text data. - if t.Elem() == reflect.TypeOf(byte(0)) { + if t.Elem() == byteType { b := v.Bytes() isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 68b5c1ae164d9..23e444f62f364 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -104,7 +104,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { case t.Kind() == reflect.String: sx, sy = vx.String(), vy.String() isString = true - case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + case t.Kind() == reflect.Slice && t.Elem() == byteType: sx, sy = string(vx.Bytes()), string(vy.Bytes()) isString = true case t.Kind() == reflect.Array: @@ -147,7 +147,10 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }) efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) - isPureLinedText = efficiencyLines < 4*efficiencyBytes + quotedLength := len(strconv.Quote(sx + sy)) + unquotedLength := len(sx) + len(sy) + escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength) + isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1 } } @@ -171,12 +174,13 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // differences in a string literal. This format is more readable, // but has edge-cases where differences are visually indistinguishable. // This format is avoided under the following conditions: - // • A line starts with `"""` - // • A line starts with "..." - // • A line contains non-printable characters - // • Adjacent different lines differ only by whitespace + // - A line starts with `"""` + // - A line starts with "..." + // - A line contains non-printable characters + // - Adjacent different lines differ only by whitespace // // For example: + // // """ // ... // 3 identical lines // foo @@ -231,7 +235,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} switch t.Kind() { case reflect.String: - if t != reflect.TypeOf(string("")) { + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: @@ -326,12 +330,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch t.Kind() { case reflect.String: out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf(string("")) { + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf([]byte(nil)) { + if t != bytesType { out = opts.FormatType(t, out) } } @@ -446,7 +450,6 @@ func (opts formatOptions) formatDiffSlice( // {NumIdentical: 3}, // {NumInserted: 1}, // ] -// func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { var prevMode byte lastStats := func(mode byte) *diffStats { @@ -503,7 +506,6 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) // {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, // {NumIdentical: 63}, // ] -// func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { groups, groupsOrig := groups[:0], groups for i, ds := range groupsOrig { @@ -548,7 +550,6 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat // {NumRemoved: 9}, // {NumIdentical: 64}, // incremented by 10 // ] -// func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { var ix, iy int // indexes into sequence x and y for i, ds := range groups { diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go index 0fd46d7ffb6e7..388fcf5712085 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_text.go +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -393,6 +393,7 @@ func (s diffStats) Append(ds diffStats) diffStats { // String prints a humanly-readable summary of coalesced records. // // Example: +// // diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" func (s diffStats) String() string { var ss []string diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go new file mode 100644 index 0000000000000..d7fcbf2865169 --- /dev/null +++ b/vendor/github.com/google/uuid/null.go @@ -0,0 +1,118 @@ +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index 60d26bb50c6a9..a57207aeb6fd8 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -12,6 +12,7 @@ import ( "fmt" "io" "strings" + "sync" ) // A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC @@ -33,7 +34,15 @@ const ( Future // Reserved for future definition. ) -var rander = rand.Reader // random function +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) type invalidLengthError struct{ len int } @@ -41,6 +50,12 @@ func (err invalidLengthError) Error() string { return fmt.Sprintf("invalid UUID length: %d", err.len) } +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} + // Parse decodes s into a UUID or returns an error. Both the standard UUID // forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the @@ -249,3 +264,31 @@ func SetRand(r io.Reader) { } rander = r } + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go index 86160fbd0725f..7697802e4d16b 100644 --- a/vendor/github.com/google/uuid/version4.go +++ b/vendor/github.com/google/uuid/version4.go @@ -27,6 +27,8 @@ func NewString() string { // The strength of the UUIDs is based on the strength of the crypto/rand // package. // +// Uses the randomness pool if it was enabled with EnableRandPool. +// // A note about uniqueness derived from the UUID Wikipedia entry: // // Randomly generated UUIDs have 122 random bits. One's annual risk of being @@ -35,7 +37,10 @@ func NewString() string { // equivalent to the odds of creating a few tens of trillions of UUIDs in a // year and having one duplicate. func NewRandom() (UUID, error) { - return NewRandomFromReader(rander) + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() } // NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. @@ -49,3 +54,23 @@ func NewRandomFromReader(r io.Reader) (UUID, error) { uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 return uuid, nil } + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE b/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go new file mode 100644 index 0000000000000..aecaff59ee8a0 --- /dev/null +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go @@ -0,0 +1,182 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner"). +// +// The signer binary is OS-specific, but exposes a standard set of APIs for the client to use. +package client + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/gob" + "fmt" + "io" + "io/ioutil" + "log" + "net/rpc" + "os" + "os/exec" + + "github.com/googleapis/enterprise-certificate-proxy/client/util" +) + +const signAPI = "EnterpriseCertSigner.Sign" +const certificateChainAPI = "EnterpriseCertSigner.CertificateChain" +const publicKeyAPI = "EnterpriseCertSigner.Public" + +// A Connection wraps a pair of unidirectional streams as an io.ReadWriteCloser. +type Connection struct { + io.ReadCloser + io.WriteCloser +} + +// Close closes c's underlying ReadCloser and WriteCloser. +func (c *Connection) Close() error { + rerr := c.ReadCloser.Close() + werr := c.WriteCloser.Close() + if rerr != nil { + return rerr + } + return werr +} + +// If ECP Logging is enabled return true +// Otherwise return false +func enableECPLogging() bool { + if os.Getenv("ENABLE_ENTERPRISE_CERTIFICATE_LOGS") != "" { + return true + } + + log.SetOutput(ioutil.Discard) + return false +} + +func init() { + gob.Register(crypto.SHA256) + gob.Register(&rsa.PSSOptions{}) +} + +// SignArgs contains arguments to a crypto Signer.Sign method. +type SignArgs struct { + Digest []byte // The content to sign. + Opts crypto.SignerOpts // Options for signing, such as Hash identifier. +} + +// Key implements credential.Credential by holding the executed signer subprocess. +type Key struct { + cmd *exec.Cmd // Pointer to the signer subprocess. + client *rpc.Client // Pointer to the rpc client that communicates with the signer subprocess. + publicKey crypto.PublicKey // Public key of loaded certificate. + chain [][]byte // Certificate chain of loaded certificate. +} + +// CertificateChain returns the credential as a raw X509 cert chain. This contains the public key. +func (k *Key) CertificateChain() [][]byte { + return k.chain +} + +// Close closes the RPC connection and kills the signer subprocess. +// Call this to free up resources when the Key object is no longer needed. +func (k *Key) Close() error { + if err := k.cmd.Process.Kill(); err != nil { + return fmt.Errorf("failed to kill signer process: %w", err) + } + // Wait for cmd to exit and release resources. Since the process is forcefully killed, this + // will return a non-nil error (varies by OS), which we will ignore. + k.cmd.Wait() + // The Pipes connecting the RPC client should have been closed when the signer subprocess was killed. + // Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault. + if err := k.client.Close(); err.Error() != "close |0: file already closed" { + return fmt.Errorf("failed to close RPC connection: %w", err) + } + return nil +} + +// Public returns the public key for this Key. +func (k *Key) Public() crypto.PublicKey { + return k.publicKey +} + +// Sign signs a message digest, using the specified signer options. +func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) { + if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() { + return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size()) + } + err = k.client.Call(signAPI, SignArgs{Digest: digest, Opts: opts}, &signed) + return +} + +// Cred spawns a signer subprocess that listens on stdin/stdout to perform certificate +// related operations, including signing messages with the private key. +// +// The signer binary path is read from the specified configFilePath, if provided. +// Otherwise, use the default config file path. +// +// The config file also specifies which certificate the signer should use. +func Cred(configFilePath string) (*Key, error) { + enableECPLogging() + if configFilePath == "" { + configFilePath = util.GetDefaultConfigFilePath() + } + enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath) + if err != nil { + return nil, err + } + k := &Key{ + cmd: exec.Command(enterpriseCertSignerPath, configFilePath), + } + + // Redirect errors from subprocess to parent process. + k.cmd.Stderr = os.Stderr + + // RPC client will communicate with subprocess over stdin/stdout. + kin, err := k.cmd.StdinPipe() + if err != nil { + return nil, err + } + kout, err := k.cmd.StdoutPipe() + if err != nil { + return nil, err + } + k.client = rpc.NewClient(&Connection{kout, kin}) + + if err := k.cmd.Start(); err != nil { + return nil, fmt.Errorf("starting enterprise cert signer subprocess: %w", err) + } + + if err := k.client.Call(certificateChainAPI, struct{}{}, &k.chain); err != nil { + return nil, fmt.Errorf("failed to retrieve certificate chain: %w", err) + } + + var publicKeyBytes []byte + if err := k.client.Call(publicKeyAPI, struct{}{}, &publicKeyBytes); err != nil { + return nil, fmt.Errorf("failed to retrieve public key: %w", err) + } + + publicKey, err := x509.ParsePKIXPublicKey(publicKeyBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse public key: %w", err) + } + + var ok bool + k.publicKey, ok = publicKey.(crypto.PublicKey) + if !ok { + return nil, fmt.Errorf("invalid public key type: %T", publicKey) + } + + switch pub := k.publicKey.(type) { + case *rsa.PublicKey: + if pub.Size() < 256 { + return nil, fmt.Errorf("RSA modulus size is less than 2048 bits: %v", pub.Size()*8) + } + case *ecdsa.PublicKey: + default: + return nil, fmt.Errorf("unsupported public key type: %v", pub) + } + + return k, nil +} diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go new file mode 100644 index 0000000000000..ccef5278a309c --- /dev/null +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go @@ -0,0 +1,71 @@ +// Package util provides helper functions for the client. +package util + +import ( + "encoding/json" + "errors" + "io/ioutil" + "os" + "os/user" + "path/filepath" + "runtime" +) + +const configFileName = "certificate_config.json" + +// EnterpriseCertificateConfig contains parameters for initializing signer. +type EnterpriseCertificateConfig struct { + Libs Libs `json:"libs"` +} + +// Libs specifies the locations of helper libraries. +type Libs struct { + ECP string `json:"ecp"` +} + +// LoadSignerBinaryPath retrieves the path of the signer binary from the config file. +func LoadSignerBinaryPath(configFilePath string) (path string, err error) { + jsonFile, err := os.Open(configFilePath) + if err != nil { + return "", err + } + + byteValue, err := ioutil.ReadAll(jsonFile) + if err != nil { + return "", err + } + var config EnterpriseCertificateConfig + err = json.Unmarshal(byteValue, &config) + if err != nil { + return "", err + } + signerBinaryPath := config.Libs.ECP + if signerBinaryPath == "" { + return "", errors.New("signer binary path is missing") + } + return signerBinaryPath, nil +} + +func guessHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} + +func getDefaultConfigFileDirectory() (directory string) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud") + } + return filepath.Join(guessHomeDir(), ".config/gcloud") +} + +// GetDefaultConfigFilePath returns the default path of the enterprise certificate config file created by gCloud. +func GetDefaultConfigFilePath() (path string) { + return filepath.Join(getDefaultConfigFileDirectory(), configFileName) +} diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 0e643a05b571c..d88960b7ef17d 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.4.0" + "v2": "2.7.0" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index b42ace44c9816..b75170f2227cc 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,34 @@ # Changelog +## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02) + + +### Features + +* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6)) +* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f)) + +## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13) + + +### Features + +* **v2:** copy DetermineContentType functionality ([#230](https://github.com/googleapis/gax-go/issues/230)) ([2c52a70](https://github.com/googleapis/gax-go/commit/2c52a70bae965397f740ed27d46aabe89ff249b3)) + +## [2.5.1](https://github.com/googleapis/gax-go/compare/v2.5.0...v2.5.1) (2022-08-04) + + +### Bug Fixes + +* **v2:** resolve bad genproto pseudoversion in go.mod ([#218](https://github.com/googleapis/gax-go/issues/218)) ([1379b27](https://github.com/googleapis/gax-go/commit/1379b27e9846d959f7e1163b9ef298b3c92c8d23)) + +## [2.5.0](https://github.com/googleapis/gax-go/compare/v2.4.0...v2.5.0) (2022-08-04) + + +### Features + +* add ExtractProtoMessage to apierror ([#213](https://github.com/googleapis/gax-go/issues/213)) ([a6ce70c](https://github.com/googleapis/gax-go/commit/a6ce70c725c890533a9de6272d3b5ba2e336d6bb)) + ## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index 7d0128a0cd537..aa6be1304f176 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -41,6 +41,7 @@ import ( "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) // ErrDetails holds the google/rpc/error_details.proto messages. @@ -60,6 +61,30 @@ type ErrDetails struct { Unknown []interface{} } +// ErrMessageNotFound is used to signal ExtractProtoMessage found no matching messages. +var ErrMessageNotFound = errors.New("message not found") + +// ExtractProtoMessage provides a mechanism for extracting protobuf messages from the +// Unknown error details. If ExtractProtoMessage finds an unknown message of the same type, +// the content of the message is copied to the provided message. +// +// ExtractProtoMessage will return ErrMessageNotFound if there are no message matching the +// protocol buffer type of the provided message. +func (e ErrDetails) ExtractProtoMessage(v proto.Message) error { + if v == nil { + return ErrMessageNotFound + } + for _, elem := range e.Unknown { + if elemProto, ok := elem.(proto.Message); ok { + if v.ProtoReflect().Type() == elemProto.ProtoReflect().Type() { + proto.Merge(v, elemProto) + return nil + } + } + } + return ErrMessageNotFound +} + func (e ErrDetails) String() string { var d strings.Builder if e.ErrorInfo != nil { @@ -208,30 +233,49 @@ func (a *APIError) Metadata() map[string]string { } -// FromError parses a Status error or a googleapi.Error and builds an APIError. -func FromError(err error) (*APIError, bool) { - if err == nil { - return nil, false - } - - ae := APIError{err: err} +// setDetailsFromError parses a Status error or a googleapi.Error +// and sets status and details or httpErr and details, respectively. +// It returns false if neither Status nor googleapi.Error can be parsed. +func (a *APIError) setDetailsFromError(err error) bool { st, isStatus := status.FromError(err) var herr *googleapi.Error isHTTPErr := errors.As(err, &herr) switch { case isStatus: - ae.status = st - ae.details = parseDetails(st.Details()) + a.status = st + a.details = parseDetails(st.Details()) case isHTTPErr: - ae.httpErr = herr - ae.details = parseHTTPDetails(herr) + a.httpErr = herr + a.details = parseHTTPDetails(herr) default: - return nil, false + return false } + return true +} - return &ae, true +// FromError parses a Status error or a googleapi.Error and builds an +// APIError, wrapping the provided error in the new APIError. It +// returns false if neither Status nor googleapi.Error can be parsed. +func FromError(err error) (*APIError, bool) { + return ParseError(err, true) +} +// ParseError parses a Status error or a googleapi.Error and builds an +// APIError. If wrap is true, it wraps the error in the new APIError. +// It returns false if neither Status nor googleapi.Error can be parsed. +func ParseError(err error, wrap bool) (*APIError, bool) { + if err == nil { + return nil, false + } + ae := APIError{} + if wrap { + ae = APIError{err: err} + } + if !ae.setDetailsFromError(err) { + return nil, false + } + return &ae, true } // parseDetails accepts a slice of interface{} that should be backed by some diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go new file mode 100644 index 0000000000000..e4b03f161d823 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go @@ -0,0 +1,256 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.17.3 +// source: custom_error.proto + +package jsonerror + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Error code for `CustomError`. +type CustomError_CustomErrorCode int32 + +const ( + // Default error. + CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED CustomError_CustomErrorCode = 0 + // Too many foo. + CustomError_TOO_MANY_FOO CustomError_CustomErrorCode = 1 + // Not enough foo. + CustomError_NOT_ENOUGH_FOO CustomError_CustomErrorCode = 2 + // Catastrophic error. + CustomError_UNIVERSE_WAS_DESTROYED CustomError_CustomErrorCode = 3 +) + +// Enum value maps for CustomError_CustomErrorCode. +var ( + CustomError_CustomErrorCode_name = map[int32]string{ + 0: "CUSTOM_ERROR_CODE_UNSPECIFIED", + 1: "TOO_MANY_FOO", + 2: "NOT_ENOUGH_FOO", + 3: "UNIVERSE_WAS_DESTROYED", + } + CustomError_CustomErrorCode_value = map[string]int32{ + "CUSTOM_ERROR_CODE_UNSPECIFIED": 0, + "TOO_MANY_FOO": 1, + "NOT_ENOUGH_FOO": 2, + "UNIVERSE_WAS_DESTROYED": 3, + } +) + +func (x CustomError_CustomErrorCode) Enum() *CustomError_CustomErrorCode { + p := new(CustomError_CustomErrorCode) + *p = x + return p +} + +func (x CustomError_CustomErrorCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CustomError_CustomErrorCode) Descriptor() protoreflect.EnumDescriptor { + return file_custom_error_proto_enumTypes[0].Descriptor() +} + +func (CustomError_CustomErrorCode) Type() protoreflect.EnumType { + return &file_custom_error_proto_enumTypes[0] +} + +func (x CustomError_CustomErrorCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CustomError_CustomErrorCode.Descriptor instead. +func (CustomError_CustomErrorCode) EnumDescriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0, 0} +} + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +type CustomError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error code specific to the custom API being invoked. + Code CustomError_CustomErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=error.CustomError_CustomErrorCode" json:"code,omitempty"` + // Name of the failed entity. + Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"` + // Message that describes the error. + ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *CustomError) Reset() { + *x = CustomError{} + if protoimpl.UnsafeEnabled { + mi := &file_custom_error_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomError) ProtoMessage() {} + +func (x *CustomError) ProtoReflect() protoreflect.Message { + mi := &file_custom_error_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomError.ProtoReflect.Descriptor instead. +func (*CustomError) Descriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0} +} + +func (x *CustomError) GetCode() CustomError_CustomErrorCode { + if x != nil { + return x.Code + } + return CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED +} + +func (x *CustomError) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *CustomError) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_custom_error_proto protoreflect.FileDescriptor + +var file_custom_error_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x0b, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x76, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, + 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4f, 0x5f, 0x4d, 0x41, + 0x4e, 0x59, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4e, 0x4f, 0x54, 0x5f, + 0x45, 0x4e, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, + 0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x57, 0x41, 0x53, 0x5f, 0x44, 0x45, 0x53, + 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_custom_error_proto_rawDescOnce sync.Once + file_custom_error_proto_rawDescData = file_custom_error_proto_rawDesc +) + +func file_custom_error_proto_rawDescGZIP() []byte { + file_custom_error_proto_rawDescOnce.Do(func() { + file_custom_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_custom_error_proto_rawDescData) + }) + return file_custom_error_proto_rawDescData +} + +var file_custom_error_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_custom_error_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_custom_error_proto_goTypes = []interface{}{ + (CustomError_CustomErrorCode)(0), // 0: error.CustomError.CustomErrorCode + (*CustomError)(nil), // 1: error.CustomError +} +var file_custom_error_proto_depIdxs = []int32{ + 0, // 0: error.CustomError.code:type_name -> error.CustomError.CustomErrorCode + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_custom_error_proto_init() } +func file_custom_error_proto_init() { + if File_custom_error_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_custom_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_custom_error_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_custom_error_proto_goTypes, + DependencyIndexes: file_custom_error_proto_depIdxs, + EnumInfos: file_custom_error_proto_enumTypes, + MessageInfos: file_custom_error_proto_msgTypes, + }.Build() + File_custom_error_proto = out.File + file_custom_error_proto_rawDesc = nil + file_custom_error_proto_goTypes = nil + file_custom_error_proto_depIdxs = nil +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto new file mode 100644 index 0000000000000..21678ae65c99e --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto @@ -0,0 +1,50 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package error; + +option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; + + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +message CustomError { + + // Error code for `CustomError`. + enum CustomErrorCode { + // Default error. + CUSTOM_ERROR_CODE_UNSPECIFIED = 0; + + // Too many foo. + TOO_MANY_FOO = 1; + + // Not enough foo. + NOT_ENOUGH_FOO = 2; + + // Catastrophic error. + UNIVERSE_WAS_DESTROYED = 3; + + } + + // Error code specific to the custom API being invoked. + CustomErrorCode code = 1; + + // Name of the failed entity. + string entity = 2; + + // Message that describes the error. + string error_message = 3; +} diff --git a/vendor/github.com/googleapis/gax-go/v2/content_type.go b/vendor/github.com/googleapis/gax-go/v2/content_type.go new file mode 100644 index 0000000000000..1b53d0a3ac1a8 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/content_type.go @@ -0,0 +1,112 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "io" + "io/ioutil" + "net/http" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// The content of media will be sniffed to determine the content type. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader) (io.Reader, string) { + // For backwards compatibility, allow clients to set content + // type by providing a ContentTyper for media. + // Note: This is an anonymous interface definition copied from googleapi.ContentTyper. + if typer, ok := media.(interface { + ContentType() string + }); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index bf272a5045c26..0ba5da1dd1eaf 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.4.0" +const Version = "2.7.0" diff --git a/vendor/github.com/stretchr/objx/.travis.yml b/vendor/github.com/stretchr/objx/.travis.yml deleted file mode 100644 index cde6eb2affdf2..0000000000000 --- a/vendor/github.com/stretchr/objx/.travis.yml +++ /dev/null @@ -1,30 +0,0 @@ -language: go -go: - - "1.10.x" - - "1.11.x" - - "1.12.x" - - master - -matrix: - allow_failures: - - go: master -fast_finish: true - -env: - global: - - CC_TEST_REPORTER_ID=68feaa3410049ce73e145287acbcdacc525087a30627f96f04e579e75bd71c00 - -before_script: - - curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter - - chmod +x ./cc-test-reporter - - ./cc-test-reporter before-build - -install: - - curl -sL https://taskfile.dev/install.sh | sh - -script: - - diff -u <(echo -n) <(./bin/task lint) - - ./bin/task test-coverage - -after_script: - - ./cc-test-reporter after-build --exit-code $TRAVIS_TEST_RESULT diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml index a749ac5492e5b..7746f516da205 100644 --- a/vendor/github.com/stretchr/objx/Taskfile.yml +++ b/vendor/github.com/stretchr/objx/Taskfile.yml @@ -25,6 +25,6 @@ tasks: - go test -race ./... test-coverage: - desc: Runs go tests and calucates test coverage + desc: Runs go tests and calculates test coverage cmds: - go test -race -coverprofile=c.out ./... diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go index 676316281154e..4c6045588637a 100644 --- a/vendor/github.com/stretchr/objx/accessors.go +++ b/vendor/github.com/stretchr/objx/accessors.go @@ -1,6 +1,7 @@ package objx import ( + "reflect" "regexp" "strconv" "strings" @@ -16,11 +17,18 @@ const ( // arrayAccesRegexString is the regex used to extract the array number // from the access path arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + + // mapAccessRegexString is the regex used to extract the map key + // from the access path + mapAccessRegexString = `^([^\[]*)\[([^\]]+)\](.*)$` ) // arrayAccesRegex is the compiled arrayAccesRegexString var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) +// mapAccessRegex is the compiled mapAccessRegexString +var mapAccessRegex = regexp.MustCompile(mapAccessRegexString) + // Get gets the value using the specified selector and // returns it inside a new Obj object. // @@ -70,15 +78,53 @@ func getIndex(s string) (int, string) { return -1, s } +// getKey returns the key which is held in s by two brackets. +// It also returns the next selector. +func getKey(s string) (string, string) { + selSegs := strings.SplitN(s, PathSeparator, 2) + thisSel := selSegs[0] + nextSel := "" + + if len(selSegs) > 1 { + nextSel = selSegs[1] + } + + mapMatches := mapAccessRegex.FindStringSubmatch(s) + if len(mapMatches) > 0 { + if _, err := strconv.Atoi(mapMatches[2]); err != nil { + thisSel = mapMatches[1] + nextSel = "[" + mapMatches[2] + "]" + mapMatches[3] + + if thisSel == "" { + thisSel = mapMatches[2] + nextSel = mapMatches[3] + } + + if nextSel == "" { + selSegs = []string{"", ""} + } else if nextSel[0] == '.' { + nextSel = nextSel[1:] + } + } + } + + return thisSel, nextSel +} + // access accesses the object using the selector and performs the // appropriate action. func access(current interface{}, selector string, value interface{}, isSet bool) interface{} { - selSegs := strings.SplitN(selector, PathSeparator, 2) - thisSel := selSegs[0] - index := -1 + thisSel, nextSel := getKey(selector) - if strings.Contains(thisSel, "[") { + indexes := []int{} + for strings.Contains(thisSel, "[") { + prevSel := thisSel + index := -1 index, thisSel = getIndex(thisSel) + indexes = append(indexes, index) + if prevSel == thisSel { + break + } } if curMap, ok := current.(Map); ok { @@ -88,13 +134,17 @@ func access(current interface{}, selector string, value interface{}, isSet bool) switch current.(type) { case map[string]interface{}: curMSI := current.(map[string]interface{}) - if len(selSegs) <= 1 && isSet { + if nextSel == "" && isSet { curMSI[thisSel] = value return nil } _, ok := curMSI[thisSel].(map[string]interface{}) - if (curMSI[thisSel] == nil || !ok) && index == -1 && isSet { + if !ok { + _, ok = curMSI[thisSel].(Map) + } + + if (curMSI[thisSel] == nil || !ok) && len(indexes) == 0 && isSet { curMSI[thisSel] = map[string]interface{}{} } @@ -102,18 +152,46 @@ func access(current interface{}, selector string, value interface{}, isSet bool) default: current = nil } + // do we need to access the item of an array? - if index > -1 { - if array, ok := current.([]interface{}); ok { - if index < len(array) { - current = array[index] - } else { - current = nil + if len(indexes) > 0 { + num := len(indexes) + for num > 0 { + num-- + index := indexes[num] + indexes = indexes[:num] + if array, ok := interSlice(current); ok { + if index < len(array) { + current = array[index] + } else { + current = nil + break + } } } } - if len(selSegs) > 1 { - current = access(current, selSegs[1], value, isSet) + + if nextSel != "" { + current = access(current, nextSel, value, isSet) } return current } + +func interSlice(slice interface{}) ([]interface{}, bool) { + if array, ok := slice.([]interface{}); ok { + return array, ok + } + + s := reflect.ValueOf(slice) + if s.Kind() != reflect.Slice { + return nil, false + } + + ret := make([]interface{}, s.Len()) + + for i := 0; i < s.Len(); i++ { + ret[i] = s.Index(i).Interface() + } + + return ret, true +} diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go index 95149c06a6d31..a64712a08b50b 100644 --- a/vendor/github.com/stretchr/objx/map.go +++ b/vendor/github.com/stretchr/objx/map.go @@ -92,6 +92,18 @@ func MustFromJSON(jsonString string) Map { return o } +// MustFromJSONSlice creates a new slice of Map containing the data specified in the +// jsonString. Works with jsons with a top level array +// +// Panics if the JSON is invalid. +func MustFromJSONSlice(jsonString string) []Map { + slice, err := FromJSONSlice(jsonString) + if err != nil { + panic("objx: MustFromJSONSlice failed with error: " + err.Error()) + } + return slice +} + // FromJSON creates a new Map containing the data specified in the // jsonString. // @@ -102,45 +114,20 @@ func FromJSON(jsonString string) (Map, error) { if err != nil { return Nil, err } - m.tryConvertFloat64() return m, nil } -func (m Map) tryConvertFloat64() { - for k, v := range m { - switch v.(type) { - case float64: - f := v.(float64) - if float64(int(f)) == f { - m[k] = int(f) - } - case map[string]interface{}: - t := New(v) - t.tryConvertFloat64() - m[k] = t - case []interface{}: - m[k] = tryConvertFloat64InSlice(v.([]interface{})) - } - } -} - -func tryConvertFloat64InSlice(s []interface{}) []interface{} { - for k, v := range s { - switch v.(type) { - case float64: - f := v.(float64) - if float64(int(f)) == f { - s[k] = int(f) - } - case map[string]interface{}: - t := New(v) - t.tryConvertFloat64() - s[k] = t - case []interface{}: - s[k] = tryConvertFloat64InSlice(v.([]interface{})) - } +// FromJSONSlice creates a new slice of Map containing the data specified in the +// jsonString. Works with jsons with a top level array +// +// Returns an error if the JSON is invalid. +func FromJSONSlice(jsonString string) ([]Map, error) { + var slice []Map + err := json.Unmarshal([]byte(jsonString), &slice) + if err != nil { + return nil, err } - return s + return slice, nil } // FromBase64 creates a new Obj containing the data specified diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go index 9859b407f0290..45850456e17f8 100644 --- a/vendor/github.com/stretchr/objx/type_specific_codegen.go +++ b/vendor/github.com/stretchr/objx/type_specific_codegen.go @@ -385,6 +385,11 @@ func (v *Value) Int(optionalDefault ...int) int { if s, ok := v.data.(int); ok { return s } + if s, ok := v.data.(float64); ok { + if float64(int(s)) == s { + return int(s) + } + } if len(optionalDefault) == 1 { return optionalDefault[0] } @@ -395,6 +400,11 @@ func (v *Value) Int(optionalDefault ...int) int { // // Panics if the object is not a int. func (v *Value) MustInt() int { + if s, ok := v.data.(float64); ok { + if float64(int(s)) == s { + return int(s) + } + } return v.data.(int) } diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 3bb22a9718eb8..95d8e59da69bf 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -1,6 +1,7 @@ package assert import ( + "bytes" "fmt" "reflect" "time" @@ -32,7 +33,8 @@ var ( stringType = reflect.TypeOf("") - timeType = reflect.TypeOf(time.Time{}) + timeType = reflect.TypeOf(time.Time{}) + bytesType = reflect.TypeOf([]byte{}) ) func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { @@ -323,6 +325,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) } + case reflect.Slice: + { + // We only care about the []byte type. + if !canConvert(obj1Value, bytesType) { + break + } + + // []byte can be compared! + bytesObj1, ok := obj1.([]byte) + if !ok { + bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte) + + } + bytesObj2, ok := obj2.([]byte) + if !ok { + bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) + } + + return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + } } return compareEqual, false diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 27e2420ed2e76..7880b8f943330 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -736,6 +736,16 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) } +// WithinRangef asserts that a time is within a time range (inclusive). +// +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...) +} + // YAMLEqf asserts that two YAML strings are equivalent. func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index d9ea368d0a355..339515b8bfb9a 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -1461,6 +1461,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta return WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// WithinRange asserts that a time is within a time range (inclusive). +// +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinRange(a.t, actual, start, end, msgAndArgs...) +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinRangef(a.t, actual, start, end, msg, args...) +} + // YAMLEq asserts that two YAML strings are equivalent. func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 0357b2231a2cd..fa1245b189738 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "os" + "path/filepath" "reflect" "regexp" "runtime" @@ -144,7 +145,8 @@ func CallerInfo() []string { if len(parts) > 1 { dir := parts[len(parts)-2] if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + path, _ := filepath.Abs(file) + callers = append(callers, fmt.Sprintf("%s:%d", path, line)) } } @@ -563,16 +565,17 @@ func isEmpty(object interface{}) bool { switch objValue.Kind() { // collection types are empty when they have no element - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + case reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // pointers are empty if nil or if the value they point to is empty case reflect.Ptr: if objValue.IsNil() { return true } deref := objValue.Elem().Interface() return isEmpty(deref) - // for all other types, compare against the zero value + // for all other types, compare against the zero value + // array types are empty when they match their zero-initialized state default: zero := reflect.Zero(objValue.Type()) return reflect.DeepEqual(object, zero.Interface()) @@ -815,7 +818,6 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true // we consider nil to be equal to the nil set } - subsetValue := reflect.ValueOf(subset) defer func() { if e := recover(); e != nil { ok = false @@ -825,14 +827,32 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok listKind := reflect.TypeOf(list).Kind() subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice { + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } - if subsetKind != reflect.Array && subsetKind != reflect.Slice { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } + subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { + listValue := reflect.ValueOf(list) + subsetKeys := subsetValue.MapKeys() + + for i := 0; i < len(subsetKeys); i++ { + subsetKey := subsetKeys[i] + subsetElement := subsetValue.MapIndex(subsetKey).Interface() + listElement := listValue.MapIndex(subsetKey).Interface() + + if !ObjectsAreEqual(subsetElement, listElement) { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) + } + } + + return true + } + for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() ok, found := containsElement(list, element) @@ -859,7 +879,6 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) defer func() { if e := recover(); e != nil { ok = false @@ -869,14 +888,32 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) listKind := reflect.TypeOf(list).Kind() subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice { + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } - if subsetKind != reflect.Array && subsetKind != reflect.Slice { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } + subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { + listValue := reflect.ValueOf(list) + subsetKeys := subsetValue.MapKeys() + + for i := 0; i < len(subsetKeys); i++ { + subsetKey := subsetKeys[i] + subsetElement := subsetValue.MapIndex(subsetKey).Interface() + listElement := listValue.MapIndex(subsetKey).Interface() + + if !ObjectsAreEqual(subsetElement, listElement) { + return true + } + } + + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) + } + for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() ok, found := containsElement(list, element) @@ -1109,6 +1146,27 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, return true } +// WithinRange asserts that a time is within a time range (inclusive). +// +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if end.Before(start) { + return Fail(t, "Start should be before end", msgAndArgs...) + } + + if actual.Before(start) { + return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is before the range", actual, start, end), msgAndArgs...) + } else if actual.After(end) { + return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is after the range", actual, start, end), msgAndArgs...) + } + + return true +} + func toFloat(x interface{}) (float64, bool) { var xf float64 xok := true diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index 853da6cce2de9..f0af8246cfc93 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -70,6 +70,9 @@ type Call struct { // if the PanicMsg is set to a non nil string the function call will panic // irrespective of other settings PanicMsg *string + + // Calls which must be satisfied before this call can be + requires []*Call } func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call { @@ -199,6 +202,64 @@ func (c *Call) On(methodName string, arguments ...interface{}) *Call { return c.Parent.On(methodName, arguments...) } +// Unset removes a mock handler from being called. +// test.On("func", mock.Anything).Unset() +func (c *Call) Unset() *Call { + var unlockOnce sync.Once + + for _, arg := range c.Arguments { + if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { + panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) + } + } + + c.lock() + defer unlockOnce.Do(c.unlock) + + foundMatchingCall := false + + for i, call := range c.Parent.ExpectedCalls { + if call.Method == c.Method { + _, diffCount := call.Arguments.Diff(c.Arguments) + if diffCount == 0 { + foundMatchingCall = true + // Remove from ExpectedCalls + c.Parent.ExpectedCalls = append(c.Parent.ExpectedCalls[:i], c.Parent.ExpectedCalls[i+1:]...) + } + } + } + + if !foundMatchingCall { + unlockOnce.Do(c.unlock) + c.Parent.fail("\n\nmock: Could not find expected call\n-----------------------------\n\n%s\n\n", + callString(c.Method, c.Arguments, true), + ) + } + + return c +} + +// NotBefore indicates that the mock should only be called after the referenced +// calls have been called as expected. The referenced calls may be from the +// same mock instance and/or other mock instances. +// +// Mock.On("Do").Return(nil).Notbefore( +// Mock.On("Init").Return(nil) +// ) +func (c *Call) NotBefore(calls ...*Call) *Call { + c.lock() + defer c.unlock() + + for _, call := range calls { + if call.Parent == nil { + panic("not before calls must be created with Mock.On()") + } + } + + c.requires = append(c.requires, calls...) + return c +} + // Mock is the workhorse used to track activity on another object. // For an example of its usage, refer to the "Example Usage" section at the top // of this document. @@ -232,7 +293,6 @@ func (m *Mock) String() string { // TestData holds any data that might be useful for testing. Testify ignores // this data completely allowing you to do whatever you like with it. func (m *Mock) TestData() objx.Map { - if m.testData == nil { m.testData = make(objx.Map) } @@ -354,7 +414,6 @@ func (m *Mock) findClosestCall(method string, arguments ...interface{}) (*Call, } func callString(method string, arguments Arguments, includeArgumentValues bool) string { - var argValsString string if includeArgumentValues { var argVals []string @@ -378,10 +437,10 @@ func (m *Mock) Called(arguments ...interface{}) Arguments { panic("Couldn't get the caller information") } functionPath := runtime.FuncForPC(pc).Name() - //Next four lines are required to use GCCGO function naming conventions. - //For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock - //uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree - //With GCCGO we need to remove interface information starting from pN
. + // Next four lines are required to use GCCGO function naming conventions. + // For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock + // uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree + // With GCCGO we need to remove interface information starting from pN
. re := regexp.MustCompile("\\.pN\\d+_") if re.MatchString(functionPath) { functionPath = re.Split(functionPath, -1)[0] @@ -397,7 +456,7 @@ func (m *Mock) Called(arguments ...interface{}) Arguments { // If Call.WaitFor is set, blocks until the channel is closed or receives a message. func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Arguments { m.mutex.Lock() - //TODO: could combine expected and closes in single loop + // TODO: could combine expected and closes in single loop found, call := m.findExpectedCall(methodName, arguments...) if found < 0 { @@ -427,6 +486,25 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen } } + for _, requirement := range call.requires { + if satisfied, _ := requirement.Parent.checkExpectation(requirement); !satisfied { + m.mutex.Unlock() + m.fail("mock: Unexpected Method Call\n-----------------------------\n\n%s\n\nMust not be called before%s:\n\n%s", + callString(call.Method, call.Arguments, true), + func() (s string) { + if requirement.totalCalls > 0 { + s = " another call of" + } + if call.Parent != requirement.Parent { + s += " method from another mock instance" + } + return + }(), + callString(requirement.Method, requirement.Arguments, true), + ) + } + } + if call.Repeatability == 1 { call.Repeatability = -1 } else if call.Repeatability > 1 { @@ -484,9 +562,9 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { h.Helper() } for _, obj := range testObjects { - if m, ok := obj.(Mock); ok { + if m, ok := obj.(*Mock); ok { t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") - obj = &m + obj = m } m := obj.(assertExpectationser) if !m.AssertExpectations(t) { @@ -503,34 +581,36 @@ func (m *Mock) AssertExpectations(t TestingT) bool { if h, ok := t.(tHelper); ok { h.Helper() } + m.mutex.Lock() defer m.mutex.Unlock() - var somethingMissing bool var failedExpectations int // iterate through each expectation expectedCalls := m.expectedCalls() for _, expectedCall := range expectedCalls { - if !expectedCall.optional && !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 { - somethingMissing = true + satisfied, reason := m.checkExpectation(expectedCall) + if !satisfied { failedExpectations++ - t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo) - } else { - if expectedCall.Repeatability > 0 { - somethingMissing = true - failedExpectations++ - t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo) - } else { - t.Logf("PASS:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) - } } + t.Logf(reason) } - if somethingMissing { + if failedExpectations != 0 { t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo()) } - return !somethingMissing + return failedExpectations == 0 +} + +func (m *Mock) checkExpectation(call *Call) (bool, string) { + if !call.optional && !m.methodWasCalled(call.Method, call.Arguments) && call.totalCalls == 0 { + return false, fmt.Sprintf("FAIL:\t%s(%s)\n\t\tat: %s", call.Method, call.Arguments.String(), call.callerInfo) + } + if call.Repeatability > 0 { + return false, fmt.Sprintf("FAIL:\t%s(%s)\n\t\tat: %s", call.Method, call.Arguments.String(), call.callerInfo) + } + return true, fmt.Sprintf("PASS:\t%s(%s)", call.Method, call.Arguments.String()) } // AssertNumberOfCalls asserts that the method was called expectedCalls times. @@ -781,12 +861,12 @@ func (args Arguments) Is(objects ...interface{}) bool { // // Returns the diff string and number of differences found. func (args Arguments) Diff(objects []interface{}) (string, int) { - //TODO: could return string as error and nil for No difference + // TODO: could return string as error and nil for No difference - var output = "\n" + output := "\n" var differences int - var maxArgCount = len(args) + maxArgCount := len(args) if len(objects) > maxArgCount { maxArgCount = len(objects) } @@ -812,21 +892,28 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { } if matcher, ok := expected.(argumentMatcher); ok { - if matcher.Matches(actual) { + var matches bool + func() { + defer func() { + if r := recover(); r != nil { + actualFmt = fmt.Sprintf("panic in argument matcher: %v", r) + } + }() + matches = matcher.Matches(actual) + }() + if matches { output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actualFmt, matcher) } else { differences++ output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher) } } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { - // type checking if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { // not match differences++ output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*IsTypeArgument)(nil)) { t := expected.(*IsTypeArgument).t if reflect.TypeOf(t) != reflect.TypeOf(actual) { @@ -834,7 +921,6 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, reflect.TypeOf(t).Name(), reflect.TypeOf(actual).Name(), actualFmt) } } else { - // normal checking if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { @@ -854,7 +940,6 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { } return output, differences - } // Assert compares the arguments with the specified objects and fails if @@ -876,7 +961,6 @@ func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { t.Errorf("%sArguments do not match.", assert.CallerInfo()) return false - } // String gets the argument at the specified index. Panics if there is no argument, or @@ -885,7 +969,6 @@ func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { // If no index is provided, String() returns a complete string representation // of the arguments. func (args Arguments) String(indexOrNil ...int) string { - if len(indexOrNil) == 0 { // normal String() method - return a string representation of the args var argsStr []string @@ -895,7 +978,7 @@ func (args Arguments) String(indexOrNil ...int) string { return strings.Join(argsStr, ",") } else if len(indexOrNil) == 1 { // Index has been specified - get the argument at that index - var index = indexOrNil[0] + index := indexOrNil[0] var s string var ok bool if s, ok = args.Get(index).(string); !ok { @@ -905,7 +988,6 @@ func (args Arguments) String(indexOrNil ...int) string { } panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) - } // Int gets the argument at the specified index. Panics if there is no argument, or diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 59c48277ac6d3..880853f5a2c59 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -1864,6 +1864,32 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim t.FailNow() } +// WithinRange asserts that a time is within a time range (inclusive). +// +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinRange(t, actual, start, end, msgAndArgs...) { + return + } + t.FailNow() +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinRangef(t, actual, start, end, msg, args...) { + return + } + t.FailNow() +} + // YAMLEq asserts that two YAML strings are equivalent. func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 5bb07c89c68b7..960bf6f2cabfe 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -1462,6 +1462,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// WithinRange asserts that a time is within a time range (inclusive). +// +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + WithinRange(a.t, actual, start, end, msgAndArgs...) +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + WithinRangef(a.t, actual, start, end, msg, args...) +} + // YAMLEq asserts that two YAML strings are equivalent. func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile index b3ce3df3032df..d896edc996813 100644 --- a/vendor/go.opencensus.io/Makefile +++ b/vendor/go.opencensus.io/Makefile @@ -91,7 +91,7 @@ embedmd: .PHONY: install-tools install-tools: - go get -u golang.org/x/lint/golint - go get -u golang.org/x/tools/cmd/cover - go get -u golang.org/x/tools/cmd/goimports - go get -u github.com/rakyll/embedmd + go install golang.org/x/lint/golint@latest + go install golang.org/x/tools/cmd/cover@latest + go install golang.org/x/tools/cmd/goimports@latest + go install github.com/rakyll/embedmd@latest diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go index e5e4b4368c1a3..11e31f421c5d8 100644 --- a/vendor/go.opencensus.io/opencensus.go +++ b/vendor/go.opencensus.io/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.23.0" + return "0.24.0" } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go index 49fde3d8c8262..fb3c19d6b646d 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go @@ -28,6 +28,7 @@ var ( ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) + ClientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "Number of started client RPCs.", stats.UnitDimensionless) ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) ) @@ -70,6 +71,14 @@ var ( Aggregation: view.Count(), } + ClientStartedRPCsView = &view.View{ + Measure: ClientStartedRPCs, + Name: "grpc.io/client/started_rpcs", + Description: "Number of started client RPCs.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: view.Count(), + } + ClientSentMessagesPerRPCView = &view.View{ Measure: ClientSentMessagesPerRPC, Name: "grpc.io/client/sent_messages_per_rpc", diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go index b2059824a85fb..fe0e971086e6a 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go @@ -27,6 +27,7 @@ var ( ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) + ServerStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "Number of started server RPCs.", stats.UnitDimensionless) ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) ) @@ -73,6 +74,14 @@ var ( Aggregation: view.Count(), } + ServerStartedRPCsView = &view.View{ + Measure: ServerStartedRPCs, + Name: "grpc.io/server/started_rpcs", + Description: "Number of started server RPCs.", + TagKeys: []tag.Key{KeyServerMethod}, + Aggregation: view.Count(), + } + ServerReceivedMessagesPerRPCView = &view.View{ Name: "grpc.io/server/received_messages_per_rpc", Description: "Distribution of messages received count per RPC, by method.", diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go index 89cac9c4ec0bb..9cb27320ca132 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go @@ -82,8 +82,10 @@ func methodName(fullname string) string { // statsHandleRPC processes the RPC events. func statsHandleRPC(ctx context.Context, s stats.RPCStats) { switch st := s.(type) { - case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: + case *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: // do nothing for client + case *stats.Begin: + handleRPCBegin(ctx, st) case *stats.OutPayload: handleRPCOutPayload(ctx, st) case *stats.InPayload: @@ -95,6 +97,25 @@ func statsHandleRPC(ctx context.Context, s stats.RPCStats) { } } +func handleRPCBegin(ctx context.Context, s *stats.Begin) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + } + + if s.IsClient() { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), + ocstats.WithMeasurements(ClientStartedRPCs.M(1))) + } else { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), + ocstats.WithMeasurements(ServerStartedRPCs.M(1))) + } +} + func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { d, ok := ctx.Value(rpcDataKey).(*rpcData) if !ok { diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go index c7ea642357265..f7c8434be06cb 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -31,14 +31,14 @@ import ( // Handler is an http.Handler wrapper to instrument your HTTP server with // OpenCensus. It supports both stats and tracing. // -// Tracing +// # Tracing // // This handler is aware of the incoming request's span, reading it from request // headers as configured using the Propagation field. // The extracted span can be accessed from the incoming request's // context. // -// span := trace.FromContext(r.Context()) +// span := trace.FromContext(r.Context()) // // The server span will be automatically ended at the end of ServeHTTP. type Handler struct { @@ -224,7 +224,9 @@ func (t *trackingResponseWriter) WriteHeader(statusCode int) { } // wrappedResponseWriter returns a wrapped version of the original -// ResponseWriter and only implements the same combination of additional +// +// ResponseWriter and only implements the same combination of additional +// // interfaces as the original. // This implementation is based on https://github.com/felixge/httpsnoop. func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go index 00d473ee02989..31477a464fd9a 100644 --- a/vendor/go.opencensus.io/stats/doc.go +++ b/vendor/go.opencensus.io/stats/doc.go @@ -19,7 +19,7 @@ Package stats contains support for OpenCensus stats recording. OpenCensus allows users to create typed measures, record measurements, aggregate the collected data, and export the aggregated data. -Measures +# Measures A measure represents a type of data point to be tracked and recorded. For example, latency, request Mb/s, and response Mb/s are measures @@ -33,7 +33,7 @@ Libraries can define and export measures. Application authors can then create views and collect and break down measures by the tags they are interested in. -Recording measurements +# Recording measurements Measurement is a data point to be collected for a measure. For example, for a latency (ms) measure, 100 is a measurement that represents a 100ms @@ -49,7 +49,7 @@ Libraries can always record measurements, and applications can later decide on which measurements they want to collect by registering views. This allows libraries to turn on the instrumentation by default. -Exemplars +# Exemplars For a given recorded measurement, the associated exemplar is a diagnostic map that gives more information about the measurement. @@ -64,6 +64,5 @@ then the trace span will be added to the exemplar associated with the measuremen When exported to a supporting back end, you should be able to easily navigate to example traces that fell into each bucket in the Distribution. - */ package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go index 36935e629b66c..436dc791f834c 100644 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -21,5 +21,11 @@ import ( // DefaultRecorder will be called for each Record call. var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) +// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but +// avoids interface{} conversion. +// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type, +// but is interface{} here to avoid import loops +var MeasurementRecorder interface{} + // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index 2b97283462e28..8b5b99803ce3e 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -86,10 +86,29 @@ func createRecordOption(ros ...Options) *recordOptions { return o } +type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) + // Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { - RecordWithOptions(ctx, WithMeasurements(ms...)) + // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality + // (RecordOptions) we can reduce some allocations to speed up this hot path + if len(ms) == 0 { + return + } + recorder := internal.MeasurementRecorder.(measurementRecorder) + record := false + for _, m := range ms { + if m.desc.subscribed() { + record = true + break + } + } + if !record { + return + } + recorder(tag.FromContext(ctx), ms, nil) + return } // RecordWithTags records one or multiple measurements at once. diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go index 748bd568cda02..61f72d20da335 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -90,9 +90,9 @@ func Sum() *Aggregation { // // If len(bounds) >= 2 then the boundaries for bucket index i are: // -// [-infinity, bounds[i]) for i = 0 -// [bounds[i-1], bounds[i]) for 0 < i < length -// [bounds[i-1], +infinity) for i = length +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length // // If len(bounds) is 0 then there is no histogram associated with the // distribution. There will be a single bucket with boundaries diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go index ac22c93a2b572..bcd6e08c7481c 100644 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -59,8 +59,15 @@ func (c *collector) clearRows() { // encodeWithKeys encodes the map by using values // only associated with the keys provided. func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + // Compute the buffer length we will need ahead of time to avoid resizing later + reqLen := 0 + for _, k := range keys { + s, _ := m.Value(k) + // We will store each key + its length + reqLen += len(s) + 1 + } vb := &tagencoding.Values{ - Buffer: make([]byte, len(keys)), + Buffer: make([]byte, reqLen), } for _, k := range keys { v, _ := m.Value(k) diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go index 7bbedfe1ff23a..60bf0e3925406 100644 --- a/vendor/go.opencensus.io/stats/view/doc.go +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -34,7 +34,7 @@ // Libraries can define views but it is recommended that in most cases registering // views be left up to applications. // -// Exporting +// # Exporting // // Collected and aggregated data can be exported to a metric collection // backend by registering its exporter. diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 6e8d18b7f6d3a..6a79cd8a34c30 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -33,6 +33,7 @@ func init() { defaultWorker = NewMeter().(*worker) go defaultWorker.start() internal.DefaultRecorder = record + internal.MeasurementRecorder = recordMeasurement } type measureRef struct { @@ -199,11 +200,21 @@ func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { defaultWorker.Record(tags, ms, attachments) } +func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { + defaultWorker.recordMeasurement(tags, ms, attachments) +} + // Record records a set of measurements ms associated with the given tags and attachments. func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + w.recordMeasurement(tags, ms.([]stats.Measurement), attachments) +} + +// recordMeasurement records a set of measurements ms associated with the given tags and attachments. +// This is the same as Record but without an interface{} type to avoid allocations +func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { req := &recordReq{ tm: tags, - ms: ms.([]stats.Measurement), + ms: ms, attachments: attachments, t: time.Now(), } @@ -221,6 +232,11 @@ func SetReportingPeriod(d time.Duration) { defaultWorker.SetReportingPeriod(d) } +// Stop stops the default worker. +func Stop() { + defaultWorker.Stop() +} + // SetReportingPeriod sets the interval between reporting aggregated views in // the program. If duration is less than or equal to zero, it enables the // default behavior. @@ -281,7 +297,7 @@ func (w *worker) start() { case <-w.quit: w.timer.Stop() close(w.c) - w.done <- true + close(w.done) return } } @@ -290,8 +306,11 @@ func (w *worker) start() { func (w *worker) Stop() { prodMgr := metricproducer.GlobalManager() prodMgr.DeleteProducer(w) - - w.quit <- true + select { + case <-w.quit: + default: + close(w.quit) + } <-w.done } diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go index b34d95e34a2cf..8fb17226fe3c5 100644 --- a/vendor/go.opencensus.io/tag/profile_19.go +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.9 // +build go1.9 package tag diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go index 83adbce56b72a..e28cf13cde97a 100644 --- a/vendor/go.opencensus.io/tag/profile_not19.go +++ b/vendor/go.opencensus.io/tag/profile_not19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.9 // +build !go1.9 package tag diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go index 04b1ee4f38eab..7a1616a55c5e3 100644 --- a/vendor/go.opencensus.io/trace/doc.go +++ b/vendor/go.opencensus.io/trace/doc.go @@ -18,24 +18,23 @@ Package trace contains support for OpenCensus distributed tracing. The following assumes a basic familiarity with OpenCensus concepts. See http://opencensus.io - -Exporting Traces +# Exporting Traces To export collected tracing data, register at least one exporter. You can use one of the provided exporters or write your own. - trace.RegisterExporter(exporter) + trace.RegisterExporter(exporter) By default, traces will be sampled relatively rarely. To change the sampling frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler to sample a subset of traces, or use AlwaysSample to collect a trace on every run: - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) Be careful about using trace.AlwaysSample in a production application with significant traffic: a new trace will be started and exported for every request. -Adding Spans to a Trace +# Adding Spans to a Trace A trace consists of a tree of spans. In Go, the current span is carried in a context.Context. @@ -44,8 +43,8 @@ It is common to want to capture all the activity of a function call in a span. F this to work, the function must take a context.Context as a parameter. Add these two lines to the top of the function: - ctx, span := trace.StartSpan(ctx, "example.com/Run") - defer span.End() + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() StartSpan will create a new top-level span if the context doesn't contain another span, otherwise it will create a child span. diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go index 908c2497ed5b7..80095a5f6c03e 100644 --- a/vendor/go.opencensus.io/trace/lrumap.go +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -44,7 +44,7 @@ func (lm lruMap) len() int { } func (lm lruMap) keys() []interface{} { - keys := make([]interface{}, len(lm.cacheKeys)) + keys := make([]interface{}, 0, len(lm.cacheKeys)) for k := range lm.cacheKeys { keys = append(keys, k) } diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go index b7d8aaf284778..b8fc1e495a9c9 100644 --- a/vendor/go.opencensus.io/trace/trace_go11.go +++ b/vendor/go.opencensus.io/trace/trace_go11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.11 // +build go1.11 package trace diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go index e25419859c02a..da488fc874014 100644 --- a/vendor/go.opencensus.io/trace/trace_nongo11.go +++ b/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.11 // +build !go1.11 package trace diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS deleted file mode 100644 index 15167cd746c56..0000000000000 --- a/vendor/golang.org/x/net/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS deleted file mode 100644 index 1c4577e968061..0000000000000 --- a/vendor/golang.org/x/net/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go index cf8947c33276b..0aa307c0611d1 100644 --- a/vendor/golang.org/x/net/bpf/vm_instructions.go +++ b/vendor/golang.org/x/net/bpf/vm_instructions.go @@ -94,7 +94,7 @@ func jumpIfCommon(cond JumpTest, skipTrue, skipFalse uint8, regA uint32, value u func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) { offset := int(ins.Off) - size := int(ins.Size) + size := ins.Size return loadCommon(in, offset, size) } @@ -121,7 +121,7 @@ func loadExtension(ins LoadExtension, in []byte) uint32 { func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) { offset := int(ins.Off) + int(regX) - size := int(ins.Size) + size := ins.Size return loadCommon(in, offset, size) } diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go index 0a54bdbcc65d9..2cb9c408f2e78 100644 --- a/vendor/golang.org/x/net/context/go17.go +++ b/vendor/golang.org/x/net/context/go17.go @@ -32,7 +32,7 @@ var DeadlineExceeded = context.DeadlineExceeded // call cancel as soon as the operations running in this Context complete. func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { ctx, f := context.WithCancel(parent) - return ctx, CancelFunc(f) + return ctx, f } // WithDeadline returns a copy of the parent context with the deadline adjusted @@ -46,7 +46,7 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { // call cancel as soon as the operations running in this Context complete. func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { ctx, f := context.WithDeadline(parent, deadline) - return ctx, CancelFunc(f) + return ctx, f } // WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). diff --git a/vendor/golang.org/x/net/http/httpproxy/proxy.go b/vendor/golang.org/x/net/http/httpproxy/proxy.go index 16994ac1347b0..c3bd9a1eeb552 100644 --- a/vendor/golang.org/x/net/http/httpproxy/proxy.go +++ b/vendor/golang.org/x/net/http/httpproxy/proxy.go @@ -81,8 +81,7 @@ type config struct { // FromEnvironment returns a Config instance populated from the // environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the -// lowercase versions thereof). HTTPS_PROXY takes precedence over -// HTTP_PROXY for https requests. +// lowercase versions thereof). // // The environment values may be either a complete URL or a // "host[:port]", in which case the "http" scheme is assumed. An error diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 0178647ee0a3f..184ac45feb708 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -23,7 +23,7 @@ const frameHeaderLen = 9 var padZeros = make([]byte, 255) // zeros for padding // A FrameType is a registered frame type as defined in -// http://http2.github.io/http2-spec/#rfc.section.11.2 +// https://httpwg.org/specs/rfc7540.html#rfc.section.11.2 type FrameType uint8 const ( @@ -146,7 +146,7 @@ func typeFrameParser(t FrameType) frameParser { // A FrameHeader is the 9 byte header of all HTTP/2 frames. // -// See http://http2.github.io/http2-spec/#FrameHeader +// See https://httpwg.org/specs/rfc7540.html#FrameHeader type FrameHeader struct { valid bool // caller can access []byte fields in the Frame @@ -575,7 +575,7 @@ func (fr *Framer) checkFrameOrder(f Frame) error { // A DataFrame conveys arbitrary, variable-length sequences of octets // associated with a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.1 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.1 type DataFrame struct { FrameHeader data []byte @@ -698,7 +698,7 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by // endpoints communicate, such as preferences and constraints on peer // behavior. // -// See http://http2.github.io/http2-spec/#SETTINGS +// See https://httpwg.org/specs/rfc7540.html#SETTINGS type SettingsFrame struct { FrameHeader p []byte @@ -837,7 +837,7 @@ func (f *Framer) WriteSettingsAck() error { // A PingFrame is a mechanism for measuring a minimal round trip time // from the sender, as well as determining whether an idle connection // is still functional. -// See http://http2.github.io/http2-spec/#rfc.section.6.7 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.7 type PingFrame struct { FrameHeader Data [8]byte @@ -870,7 +870,7 @@ func (f *Framer) WritePing(ack bool, data [8]byte) error { } // A GoAwayFrame informs the remote peer to stop creating streams on this connection. -// See http://http2.github.io/http2-spec/#rfc.section.6.8 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.8 type GoAwayFrame struct { FrameHeader LastStreamID uint32 @@ -934,7 +934,7 @@ func parseUnknownFrame(_ *frameCache, fh FrameHeader, countError func(string), p } // A WindowUpdateFrame is used to implement flow control. -// See http://http2.github.io/http2-spec/#rfc.section.6.9 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.9 type WindowUpdateFrame struct { FrameHeader Increment uint32 // never read with high bit set @@ -1123,7 +1123,7 @@ func (f *Framer) WriteHeaders(p HeadersFrameParam) error { } // A PriorityFrame specifies the sender-advised priority of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.3 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.3 type PriorityFrame struct { FrameHeader PriorityParam @@ -1193,7 +1193,7 @@ func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { } // A RSTStreamFrame allows for abnormal termination of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.4 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.4 type RSTStreamFrame struct { FrameHeader ErrCode ErrCode @@ -1225,7 +1225,7 @@ func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { } // A ContinuationFrame is used to continue a sequence of header block fragments. -// See http://http2.github.io/http2-spec/#rfc.section.6.10 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.10 type ContinuationFrame struct { FrameHeader headerFragBuf []byte @@ -1266,7 +1266,7 @@ func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlock } // A PushPromiseFrame is used to initiate a server stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.6 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.6 type PushPromiseFrame struct { FrameHeader PromiseID uint32 diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go index 97f17831fc55b..6886dc163cba5 100644 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -191,7 +191,7 @@ func appendTableSize(dst []byte, v uint32) []byte { // bit prefix, to dst and returns the extended buffer. // // See -// http://http2.github.io/http2-spec/compression.html#integer.representation +// https://httpwg.org/specs/rfc7541.html#integer.representation func appendVarInt(dst []byte, n byte, i uint64) []byte { k := uint64((1 << n) - 1) if i < k { diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go index 85f18a2b0a861..ebdfbee964ae3 100644 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -59,7 +59,7 @@ func (hf HeaderField) String() string { // Size returns the size of an entry per RFC 7541 section 4.1. func (hf HeaderField) Size() uint32 { - // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // https://httpwg.org/specs/rfc7541.html#rfc.section.4.1 // "The size of the dynamic table is the sum of the size of // its entries. The size of an entry is the sum of its name's // length in octets (as defined in Section 5.2), its value's @@ -158,7 +158,7 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { } type dynamicTable struct { - // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + // https://httpwg.org/specs/rfc7541.html#rfc.section.2.3.2 table headerFieldTable size uint32 // in bytes maxSize uint32 // current maxSize @@ -307,27 +307,27 @@ func (d *Decoder) parseHeaderFieldRepr() error { case b&128 != 0: // Indexed representation. // High bit set? - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + // https://httpwg.org/specs/rfc7541.html#rfc.section.6.1 return d.parseFieldIndexed() case b&192 == 64: // 6.2.1 Literal Header Field with Incremental Indexing // 0b10xxxxxx: top two bits are 10 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + // https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.1 return d.parseFieldLiteral(6, indexedTrue) case b&240 == 0: // 6.2.2 Literal Header Field without Indexing // 0b0000xxxx: top four bits are 0000 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + // https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.2 return d.parseFieldLiteral(4, indexedFalse) case b&240 == 16: // 6.2.3 Literal Header Field never Indexed // 0b0001xxxx: top four bits are 0001 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + // https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.3 return d.parseFieldLiteral(4, indexedNever) case b&224 == 32: // 6.3 Dynamic Table Size Update // Top three bits are '001'. - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + // https://httpwg.org/specs/rfc7541.html#rfc.section.6.3 return d.parseDynamicTableSizeUpdate() } @@ -420,7 +420,7 @@ var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} // readVarInt reads an unsigned variable length integer off the // beginning of p. n is the parameter as described in -// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// https://httpwg.org/specs/rfc7541.html#rfc.section.5.1. // // n must always be between 1 and 8. // diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 479ba4b2b11c9..6f2df281872ed 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -55,14 +55,14 @@ const ( ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" // SETTINGS_MAX_FRAME_SIZE default - // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + // https://httpwg.org/specs/rfc7540.html#rfc.section.6.5.2 initialMaxFrameSize = 16384 // NextProtoTLS is the NPN/ALPN protocol negotiated during // HTTP/2's TLS setup. NextProtoTLS = "h2" - // http://http2.github.io/http2-spec/#SettingValues + // https://httpwg.org/specs/rfc7540.html#SettingValues initialHeaderTableSize = 4096 initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size @@ -111,7 +111,7 @@ func (st streamState) String() string { // Setting is a setting parameter: which setting it is, and its value. type Setting struct { // ID is which setting is being set. - // See http://http2.github.io/http2-spec/#SettingValues + // See https://httpwg.org/specs/rfc7540.html#SettingFormat ID SettingID // Val is the value. @@ -143,7 +143,7 @@ func (s Setting) Valid() error { } // A SettingID is an HTTP/2 setting as defined in -// http://http2.github.io/http2-spec/#iana-settings +// https://httpwg.org/specs/rfc7540.html#iana-settings type SettingID uint16 const ( diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 47524a61a5d6f..43cc2a34ad021 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -143,7 +143,7 @@ type Server struct { } func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection > initialWindowSize { + if s.MaxUploadBufferPerConnection >= initialWindowSize { return s.MaxUploadBufferPerConnection } return 1 << 20 @@ -869,9 +869,7 @@ func (sc *serverConn) serve() { // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { - sc.sendWindowUpdate(nil, int(diff)) - } + sc.sendWindowUpdate(nil) if err := sc.readPreface(); err != nil { sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) @@ -1371,6 +1369,9 @@ func (sc *serverConn) startGracefulShutdownInternal() { func (sc *serverConn) goAway(code ErrCode) { sc.serveG.check() if sc.inGoAway { + if sc.goAwayCode == ErrCodeNo { + sc.goAwayCode = code + } return } sc.inGoAway = true @@ -1585,7 +1586,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { if p := st.body; p != nil { // Return any buffered unread bytes worth of conn-level flow control. // See golang.org/issue/16481 - sc.sendWindowUpdate(nil, p.Len()) + sc.sendWindowUpdate(nil) p.CloseWithError(err) } @@ -1733,7 +1734,7 @@ func (sc *serverConn) processData(f *DataFrame) error { // sendWindowUpdate, which also schedules sending the // frames. sc.inflow.take(int32(f.Length)) - sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + sc.sendWindowUpdate(nil) // conn-level if st != nil && st.resetQueued { // Already have a stream error in flight. Don't send another. @@ -1747,6 +1748,12 @@ func (sc *serverConn) processData(f *DataFrame) error { // Sender sending more than they'd declared? if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + if sc.inflow.available() < int32(f.Length) { + return sc.countError("data_flow", streamError(id, ErrCodeFlowControl)) + } + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil) // conn-level + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the // value of a content-length header field does not equal the sum of the @@ -1763,7 +1770,7 @@ func (sc *serverConn) processData(f *DataFrame) error { if len(data) > 0 { wrote, err := st.body.Write(data) if err != nil { - sc.sendWindowUpdate(nil, int(f.Length)-wrote) + sc.sendWindowUpdate32(nil, int32(f.Length)-int32(wrote)) return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed)) } if wrote != len(data) { @@ -2090,12 +2097,6 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } - bodyOpen := !f.StreamEnded() - if rp.method == "HEAD" && bodyOpen { - // HEAD requests can't have bodies - return nil, nil, sc.countError("head_body", streamError(f.StreamID, ErrCodeProtocol)) - } - rp.header = make(http.Header) for _, hf := range f.RegularFields() { rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) @@ -2108,6 +2109,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res if err != nil { return nil, nil, err } + bodyOpen := !f.StreamEnded() if bodyOpen { if vv, ok := rp.header["Content-Length"]; ok { if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { @@ -2223,6 +2225,9 @@ func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler didPanic := true defer func() { rw.rws.stream.cancelCtx() + if req.MultipartForm != nil { + req.MultipartForm.RemoveAll() + } if didPanic { e := recover() sc.writeFrameFromHandler(FrameWriteRequest{ @@ -2317,17 +2322,32 @@ func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { func (sc *serverConn) noteBodyRead(st *stream, n int) { sc.serveG.check() - sc.sendWindowUpdate(nil, n) // conn-level + sc.sendWindowUpdate(nil) // conn-level if st.state != stateHalfClosedRemote && st.state != stateClosed { // Don't send this WINDOW_UPDATE if the stream is closed // remotely. - sc.sendWindowUpdate(st, n) + sc.sendWindowUpdate(st) } } // st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate(st *stream, n int) { +func (sc *serverConn) sendWindowUpdate(st *stream) { sc.serveG.check() + + var n int32 + if st == nil { + if avail, windowSize := sc.inflow.available(), sc.srv.initialConnRecvWindowSize(); avail > windowSize/2 { + return + } else { + n = windowSize - avail + } + } else { + if avail, windowSize := st.inflow.available(), sc.srv.initialStreamRecvWindowSize(); avail > windowSize/2 { + return + } else { + n = windowSize - avail + } + } // "The legal range for the increment to the flow control // window is 1 to 2^31-1 (2,147,483,647) octets." // A Go Read call on 64-bit machines could in theory read @@ -2493,6 +2513,10 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { rws.writeHeader(200) } + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + isHeadResp := rws.req.Method == "HEAD" if !rws.sentHeader { rws.sentHeader = true @@ -2564,10 +2588,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { return 0, nil } - if rws.handlerDone { - rws.promoteUndeclaredTrailers() - } - // only send trailers if they have actually been defined by the // server handler. hasNonemptyTrailers := rws.hasNonemptyTrailers() diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 4ded4dfd56c06..c5d005bba7cc4 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -67,13 +67,23 @@ const ( // A Transport internally caches connections to servers. It is safe // for concurrent use by multiple goroutines. type Transport struct { - // DialTLS specifies an optional dial function for creating - // TLS connections for requests. + // DialTLSContext specifies an optional dial function with context for + // creating TLS connections for requests. // - // If DialTLS is nil, tls.Dial is used. + // If DialTLSContext and DialTLS is nil, tls.Dial is used. // // If the returned net.Conn has a ConnectionState method like tls.Conn, // it will be used to set http.Response.TLS. + DialTLSContext func(ctx context.Context, network, addr string, cfg *tls.Config) (net.Conn, error) + + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLSContext and DialTLS is nil, tls.Dial is used. + // + // Deprecated: Use DialTLSContext instead, which allows the transport + // to cancel dials as soon as they are no longer needed. + // If both are set, DialTLSContext takes priority. DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) // TLSClientConfig specifies the TLS configuration to use with @@ -248,7 +258,8 @@ func (t *Transport) initConnPool() { // HTTP/2 server. type ClientConn struct { t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls + tconn net.Conn // usually *tls.Conn, except specialized impls + tconnClosed bool tlsState *tls.ConnectionState // nil only for specialized impls reused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request @@ -334,8 +345,8 @@ type clientStream struct { readErr error // sticky read error; owned by transportResponseBody.Read reqBody io.ReadCloser - reqBodyContentLength int64 // -1 means unknown - reqBodyClosed bool // body has been closed; guarded by cc.mu + reqBodyContentLength int64 // -1 means unknown + reqBodyClosed chan struct{} // guarded by cc.mu; non-nil on Close, closed when done // owned by writeRequest: sentEndStream bool // sent an END_STREAM flag to the peer @@ -375,9 +386,8 @@ func (cs *clientStream) abortStreamLocked(err error) { cs.abortErr = err close(cs.abort) }) - if cs.reqBody != nil && !cs.reqBodyClosed { - cs.reqBody.Close() - cs.reqBodyClosed = true + if cs.reqBody != nil { + cs.closeReqBodyLocked() } // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { @@ -390,13 +400,24 @@ func (cs *clientStream) abortRequestBodyWrite() { cc := cs.cc cc.mu.Lock() defer cc.mu.Unlock() - if cs.reqBody != nil && !cs.reqBodyClosed { - cs.reqBody.Close() - cs.reqBodyClosed = true + if cs.reqBody != nil && cs.reqBodyClosed == nil { + cs.closeReqBodyLocked() cc.cond.Broadcast() } } +func (cs *clientStream) closeReqBodyLocked() { + if cs.reqBodyClosed != nil { + return + } + cs.reqBodyClosed = make(chan struct{}) + reqBodyClosed := cs.reqBodyClosed + go func() { + cs.reqBody.Close() + close(reqBodyClosed) + }() +} + type stickyErrWriter struct { conn net.Conn timeout time.Duration @@ -592,7 +613,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - tconn, err := t.dialTLS(ctx)("tcp", addr, t.newTLSConfig(host)) + tconn, err := t.dialTLS(ctx, "tcp", addr, t.newTLSConfig(host)) if err != nil { return nil, err } @@ -613,24 +634,25 @@ func (t *Transport) newTLSConfig(host string) *tls.Config { return cfg } -func (t *Transport) dialTLS(ctx context.Context) func(string, string, *tls.Config) (net.Conn, error) { - if t.DialTLS != nil { - return t.DialTLS +func (t *Transport) dialTLS(ctx context.Context, network, addr string, tlsCfg *tls.Config) (net.Conn, error) { + if t.DialTLSContext != nil { + return t.DialTLSContext(ctx, network, addr, tlsCfg) + } else if t.DialTLS != nil { + return t.DialTLS(network, addr, tlsCfg) } - return func(network, addr string, cfg *tls.Config) (net.Conn, error) { - tlsCn, err := t.dialTLSWithContext(ctx, network, addr, cfg) - if err != nil { - return nil, err - } - state := tlsCn.ConnectionState() - if p := state.NegotiatedProtocol; p != NextProtoTLS { - return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) - } - if !state.NegotiatedProtocolIsMutual { - return nil, errors.New("http2: could not negotiate protocol mutually") - } - return tlsCn, nil + + tlsCn, err := t.dialTLSWithContext(ctx, network, addr, tlsCfg) + if err != nil { + return nil, err + } + state := tlsCn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return tlsCn, nil } // disableKeepAlives reports whether connections should be closed as @@ -910,10 +932,10 @@ func (cc *ClientConn) onIdleTimeout() { cc.closeIfIdle() } -func (cc *ClientConn) closeConn() error { +func (cc *ClientConn) closeConn() { t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) defer t.Stop() - return cc.tconn.Close() + cc.tconn.Close() } // A tls.Conn.Close can hang for a long time if the peer is unresponsive. @@ -979,7 +1001,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { shutdownEnterWaitStateHook() select { case <-done: - return cc.closeConn() + cc.closeConn() + return nil case <-ctx.Done(): cc.mu.Lock() // Free the goroutine above @@ -1016,7 +1039,7 @@ func (cc *ClientConn) sendGoAway() error { // closes the client connection immediately. In-flight requests are interrupted. // err is sent to streams. -func (cc *ClientConn) closeForError(err error) error { +func (cc *ClientConn) closeForError(err error) { cc.mu.Lock() cc.closed = true for _, cs := range cc.streams { @@ -1024,7 +1047,7 @@ func (cc *ClientConn) closeForError(err error) error { } cc.cond.Broadcast() cc.mu.Unlock() - return cc.closeConn() + cc.closeConn() } // Close closes the client connection immediately. @@ -1032,16 +1055,17 @@ func (cc *ClientConn) closeForError(err error) error { // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. func (cc *ClientConn) Close() error { err := errors.New("http2: client connection force closed via ClientConn.Close") - return cc.closeForError(err) + cc.closeForError(err) + return nil } // closes the client connection immediately. In-flight requests are interrupted. -func (cc *ClientConn) closeForLostPing() error { +func (cc *ClientConn) closeForLostPing() { err := errors.New("http2: client connection lost") if f := cc.t.CountError; f != nil { f("conn_close_lost_ping") } - return cc.closeForError(err) + cc.closeForError(err) } // errRequestCanceled is a copy of net/http's errRequestCanceled because it's not @@ -1419,11 +1443,19 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // and in multiple cases: server replies <=299 and >299 // while still writing request body cc.mu.Lock() + mustCloseBody := false + if cs.reqBody != nil && cs.reqBodyClosed == nil { + mustCloseBody = true + cs.reqBodyClosed = make(chan struct{}) + } bodyClosed := cs.reqBodyClosed - cs.reqBodyClosed = true cc.mu.Unlock() - if !bodyClosed && cs.reqBody != nil { + if mustCloseBody { cs.reqBody.Close() + close(bodyClosed) + } + if bodyClosed != nil { + <-bodyClosed } if err != nil && cs.sentEndStream { @@ -1603,7 +1635,7 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { } if err != nil { cc.mu.Lock() - bodyClosed := cs.reqBodyClosed + bodyClosed := cs.reqBodyClosed != nil cc.mu.Unlock() switch { case bodyClosed: @@ -1698,7 +1730,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) if cc.closed { return 0, errClientConnClosed } - if cs.reqBodyClosed { + if cs.reqBodyClosed != nil { return 0, errStopReqBodyWrite } select { @@ -1994,7 +2026,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { // wake up RoundTrip if there is a pending request. cc.cond.Broadcast() - closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { if VerboseLogs { cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2) @@ -2070,6 +2102,7 @@ func (rl *clientConnReadLoop) cleanup() { err = io.ErrUnexpectedEOF } cc.closed = true + for _, cs := range cc.streams { select { case <-cs.peerClosed: @@ -2663,7 +2696,6 @@ func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { if fn := cc.t.CountError; fn != nil { fn("recv_goaway_" + f.ErrCode.stringToken()) } - } cc.setGoAway(f) return nil @@ -3017,7 +3049,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Now().Sub(cc.lastActive) + ci.IdleTime = time.Since(cc.lastActive) } cc.mu.Unlock() diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go index 0bfcf7afc6bfe..41883c530c80f 100644 --- a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -172,7 +172,23 @@ type mmsgTmpsPool struct { } func (p *mmsgTmpsPool) Get() *mmsgTmps { - return p.p.Get().(*mmsgTmps) + m := p.p.Get().(*mmsgTmps) + // Clear fields up to the len (not the cap) of the slice, + // assuming that the previous caller only used that many elements. + for i := range m.packer.sockaddrs { + m.packer.sockaddrs[i] = 0 + } + m.packer.sockaddrs = m.packer.sockaddrs[:0] + for i := range m.packer.vs { + m.packer.vs[i] = iovec{} + } + m.packer.vs = m.packer.vs[:0] + for i := range m.packer.hs { + m.packer.hs[i].Len = 0 + m.packer.hs[i].Hdr = msghdr{} + } + m.packer.hs = m.packer.hs[:0] + return m } func (p *mmsgTmpsPool) Put(tmps *mmsgTmps) { diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go index c3c7cc4c83ad7..5a38798cc0cd9 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_linux.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go @@ -17,9 +17,6 @@ func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { if sa != nil { h.Name = (*byte)(unsafe.Pointer(&sa[0])) h.Namelen = uint32(len(sa)) - } else { - h.Name = nil - h.Namelen = 0 } } diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go index ba53f564bb46b..f7d0b0d2b853d 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -8,22 +8,21 @@ package socket import ( + "net" "os" ) func (c *Conn) recvMsg(m *Message, flags int) error { m.raceWrite() - var h msghdr - vs := make([]iovec, len(m.Buffers)) - var sa []byte - if c.network != "tcp" { - sa = make([]byte, sizeofSockaddrInet6) - } - h.pack(vs, m.Buffers, m.OOB, sa) - var operr error - var n int + var ( + operr error + n int + oobn int + recvflags int + from net.Addr + ) fn := func(s uintptr) bool { - n, operr = recvmsg(s, &h, flags) + n, oobn, recvflags, from, operr = recvmsg(s, m.Buffers, m.OOB, flags, c.network) return ioComplete(flags, operr) } if err := c.c.Read(fn); err != nil { @@ -32,34 +31,21 @@ func (c *Conn) recvMsg(m *Message, flags int) error { if operr != nil { return os.NewSyscallError("recvmsg", operr) } - if c.network != "tcp" { - var err error - m.Addr, err = parseInetAddr(sa[:], c.network) - if err != nil { - return err - } - } + m.Addr = from m.N = n - m.NN = h.controllen() - m.Flags = h.flags() + m.NN = oobn + m.Flags = recvflags return nil } func (c *Conn) sendMsg(m *Message, flags int) error { m.raceRead() - var h msghdr - vs := make([]iovec, len(m.Buffers)) - var sa []byte - if m.Addr != nil { - var a [sizeofSockaddrInet6]byte - n := marshalInetAddr(m.Addr, a[:]) - sa = a[:n] - } - h.pack(vs, m.Buffers, m.OOB, sa) - var operr error - var n int + var ( + operr error + n int + ) fn := func(s uintptr) bool { - n, operr = sendmsg(s, &h, flags) + n, operr = sendmsg(s, m.Buffers, m.OOB, m.Addr, flags) return ioComplete(flags, operr) } if err := c.c.Write(fn); err != nil { diff --git a/vendor/golang.org/x/net/internal/socket/sys_stub.go b/vendor/golang.org/x/net/internal/socket/sys_stub.go index 381e45e167f9b..7cfb349c0cd53 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_stub.go +++ b/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -36,11 +36,11 @@ func setsockopt(s uintptr, level, name int, b []byte) error { return errNotImplemented } -func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { - return 0, errNotImplemented +func recvmsg(s uintptr, buffers [][]byte, oob []byte, flags int, network string) (n, oobn int, recvflags int, from net.Addr, err error) { + return 0, 0, 0, nil, errNotImplemented } -func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { +func sendmsg(s uintptr, buffers [][]byte, oob []byte, to net.Addr, flags int) (int, error) { return 0, errNotImplemented } diff --git a/vendor/golang.org/x/net/internal/socket/sys_unix.go b/vendor/golang.org/x/net/internal/socket/sys_unix.go index d203e2984cac8..de823932b9a71 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_unix.go +++ b/vendor/golang.org/x/net/internal/socket/sys_unix.go @@ -8,8 +8,10 @@ package socket import ( - "syscall" + "net" "unsafe" + + "golang.org/x/sys/unix" ) //go:linkname syscall_getsockopt syscall.getsockopt @@ -18,12 +20,6 @@ func syscall_getsockopt(s, level, name int, val unsafe.Pointer, vallen *uint32) //go:linkname syscall_setsockopt syscall.setsockopt func syscall_setsockopt(s, level, name int, val unsafe.Pointer, vallen uintptr) error -//go:linkname syscall_recvmsg syscall.recvmsg -func syscall_recvmsg(s int, msg *syscall.Msghdr, flags int) (int, error) - -//go:linkname syscall_sendmsg syscall.sendmsg -func syscall_sendmsg(s int, msg *syscall.Msghdr, flags int) (int, error) - func getsockopt(s uintptr, level, name int, b []byte) (int, error) { l := uint32(len(b)) err := syscall_getsockopt(int(s), level, name, unsafe.Pointer(&b[0]), &l) @@ -34,10 +30,93 @@ func setsockopt(s uintptr, level, name int, b []byte) error { return syscall_setsockopt(int(s), level, name, unsafe.Pointer(&b[0]), uintptr(len(b))) } -func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { - return syscall_recvmsg(int(s), (*syscall.Msghdr)(unsafe.Pointer(h)), flags) +func recvmsg(s uintptr, buffers [][]byte, oob []byte, flags int, network string) (n, oobn int, recvflags int, from net.Addr, err error) { + var unixFrom unix.Sockaddr + n, oobn, recvflags, unixFrom, err = unix.RecvmsgBuffers(int(s), buffers, oob, flags) + if unixFrom != nil { + from = sockaddrToAddr(unixFrom, network) + } + return } -func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { - return syscall_sendmsg(int(s), (*syscall.Msghdr)(unsafe.Pointer(h)), flags) +func sendmsg(s uintptr, buffers [][]byte, oob []byte, to net.Addr, flags int) (int, error) { + var unixTo unix.Sockaddr + if to != nil { + unixTo = addrToSockaddr(to) + } + return unix.SendmsgBuffers(int(s), buffers, oob, unixTo, flags) +} + +// addrToSockaddr converts a net.Addr to a unix.Sockaddr. +func addrToSockaddr(a net.Addr) unix.Sockaddr { + var ( + ip net.IP + port int + zone string + ) + switch a := a.(type) { + case *net.TCPAddr: + ip = a.IP + port = a.Port + zone = a.Zone + case *net.UDPAddr: + ip = a.IP + port = a.Port + zone = a.Zone + case *net.IPAddr: + ip = a.IP + zone = a.Zone + default: + return nil + } + + if ip4 := ip.To4(); ip4 != nil { + sa := unix.SockaddrInet4{Port: port} + copy(sa.Addr[:], ip4) + return &sa + } + + if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil { + sa := unix.SockaddrInet6{Port: port} + copy(sa.Addr[:], ip6) + if zone != "" { + sa.ZoneId = uint32(zoneCache.index(zone)) + } + return &sa + } + + return nil +} + +// sockaddrToAddr converts a unix.Sockaddr to a net.Addr. +func sockaddrToAddr(sa unix.Sockaddr, network string) net.Addr { + var ( + ip net.IP + port int + zone string + ) + switch sa := sa.(type) { + case *unix.SockaddrInet4: + ip = make(net.IP, net.IPv4len) + copy(ip, sa.Addr[:]) + port = sa.Port + case *unix.SockaddrInet6: + ip = make(net.IP, net.IPv6len) + copy(ip, sa.Addr[:]) + port = sa.Port + if sa.ZoneId > 0 { + zone = zoneCache.name(int(sa.ZoneId)) + } + default: + return nil + } + + switch network { + case "tcp", "tcp4", "tcp6": + return &net.TCPAddr{IP: ip, Port: port, Zone: zone} + case "udp", "udp4", "udp6": + return &net.UDPAddr{IP: ip, Port: port, Zone: zone} + default: + return &net.IPAddr{IP: ip, Zone: zone} + } } diff --git a/vendor/golang.org/x/net/internal/socket/sys_windows.go b/vendor/golang.org/x/net/internal/socket/sys_windows.go index 2de0d68c619aa..b738b89ddd0a6 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_windows.go +++ b/vendor/golang.org/x/net/internal/socket/sys_windows.go @@ -5,6 +5,7 @@ package socket import ( + "net" "syscall" "unsafe" @@ -37,11 +38,11 @@ func setsockopt(s uintptr, level, name int, b []byte) error { return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), int32(len(b))) } -func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { - return 0, errNotImplemented +func recvmsg(s uintptr, buffers [][]byte, oob []byte, flags int, network string) (n, oobn int, recvflags int, from net.Addr, err error) { + return 0, 0, 0, nil, errNotImplemented } -func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { +func sendmsg(s uintptr, buffers [][]byte, oob []byte, to net.Addr, flags int) (int, error) { return 0, errNotImplemented } diff --git a/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.go b/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.go index 1e38b9223281f..eaa896cb57003 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.go +++ b/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.go @@ -5,6 +5,7 @@ package socket import ( + "net" "syscall" "unsafe" ) @@ -27,12 +28,39 @@ func setsockopt(s uintptr, level, name int, b []byte) error { return errnoErr(errno) } -func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { - n, _, errno := syscall_syscall(syscall.SYS___RECVMSG_A, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) - return int(n), errnoErr(errno) +func recvmsg(s uintptr, buffers [][]byte, oob []byte, flags int, network string) (n, oobn int, recvflags int, from net.Addr, err error) { + var h msghdr + vs := make([]iovec, len(buffers)) + var sa []byte + if network != "tcp" { + sa = make([]byte, sizeofSockaddrInet6) + } + h.pack(vs, buffers, oob, sa) + sn, _, errno := syscall_syscall(syscall.SYS___RECVMSG_A, s, uintptr(unsafe.Pointer(&h)), uintptr(flags)) + n = int(sn) + oobn = h.controllen() + recvflags = h.flags() + err = errnoErr(errno) + if network != "tcp" { + var err2 error + from, err2 = parseInetAddr(sa, network) + if err2 != nil && err == nil { + err = err2 + } + } + return } -func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { - n, _, errno := syscall_syscall(syscall.SYS___SENDMSG_A, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) +func sendmsg(s uintptr, buffers [][]byte, oob []byte, to net.Addr, flags int) (int, error) { + var h msghdr + vs := make([]iovec, len(buffers)) + var sa []byte + if to != nil { + var a [sizeofSockaddrInet6]byte + n := marshalInetAddr(to, a[:]) + sa = a[:n] + } + h.pack(vs, buffers, oob, sa) + n, _, errno := syscall_syscall(syscall.SYS___SENDMSG_A, s, uintptr(unsafe.Pointer(&h)), uintptr(flags)) return int(n), errnoErr(errno) } diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go deleted file mode 100644 index 5acf6db6ea560..0000000000000 --- a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs defs_darwin.go - -package socket - -type iovec struct { - Base *byte - Len uint32 -} - -type msghdr struct { - Name *byte - Namelen uint32 - Iov *iovec - Iovlen int32 - Control *byte - Controllen uint32 - Flags int32 -} - -type cmsghdr struct { - Len uint32 - Level int32 - Type int32 -} - -const ( - sizeofIovec = 0x8 - sizeofMsghdr = 0x1c -) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_riscv64.go similarity index 79% rename from vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go rename to vendor/golang.org/x/net/internal/socket/zsys_freebsd_riscv64.go index 5acf6db6ea560..965c0b28b5106 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_riscv64.go @@ -1,11 +1,11 @@ // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs defs_darwin.go +// cgo -godefs defs_freebsd.go package socket type iovec struct { Base *byte - Len uint32 + Len uint64 } type msghdr struct { @@ -25,6 +25,6 @@ type cmsghdr struct { } const ( - sizeofIovec = 0x8 - sizeofMsghdr = 0x1c + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 ) diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_riscv64.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_riscv64.go new file mode 100644 index 0000000000000..0feb9a7536db6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_riscv64.go @@ -0,0 +1,52 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]uint8 + X__ss_align int64 + X__ss_pad2 [112]uint8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_riscv64.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_riscv64.go new file mode 100644 index 0000000000000..5b39eb8dfd29b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_riscv64.go @@ -0,0 +1,64 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]uint8 + X__ss_align int64 + X__ss_pad2 [112]uint8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go index e2fddd6459975..7caeeaa696d47 100644 --- a/vendor/golang.org/x/net/publicsuffix/list.go +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -101,7 +101,7 @@ loop: break } - u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) + u := uint32(nodeValue(f) >> (nodesBitsTextOffset + nodesBitsTextLength)) icannNode = u&(1<>= nodesBitsICANN u = children[u&(1<>= nodesBitsTextLength offset := x & (1< 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { @@ -205,6 +219,8 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil } else if c.CredentialSource.URL != "" { return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil + } else if c.CredentialSource.Executable != nil { + return CreateExecutableCredential(ctx, c.CredentialSource.Executable, c) } return nil, fmt.Errorf("oauth2/google: unable to parse credential source") } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go new file mode 100644 index 0000000000000..579bcce5f28be --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go @@ -0,0 +1,309 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strings" + "time" +) + +var serviceAccountImpersonationRE = regexp.MustCompile("https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken") + +const ( + executableSupportedMaxVersion = 1 + defaultTimeout = 30 * time.Second + timeoutMinimum = 5 * time.Second + timeoutMaximum = 120 * time.Second + executableSource = "response" + outputFileSource = "output file" +) + +type nonCacheableError struct { + message string +} + +func (nce nonCacheableError) Error() string { + return nce.message +} + +func missingFieldError(source, field string) error { + return fmt.Errorf("oauth2/google: %v missing `%q` field", source, field) +} + +func jsonParsingError(source, data string) error { + return fmt.Errorf("oauth2/google: unable to parse %v\nResponse: %v", source, data) +} + +func malformedFailureError() error { + return nonCacheableError{"oauth2/google: response must include `error` and `message` fields when unsuccessful"} +} + +func userDefinedError(code, message string) error { + return nonCacheableError{fmt.Sprintf("oauth2/google: response contains unsuccessful response: (%v) %v", code, message)} +} + +func unsupportedVersionError(source string, version int) error { + return fmt.Errorf("oauth2/google: %v contains unsupported version: %v", source, version) +} + +func tokenExpiredError() error { + return nonCacheableError{"oauth2/google: the token returned by the executable is expired"} +} + +func tokenTypeError(source string) error { + return fmt.Errorf("oauth2/google: %v contains unsupported token type", source) +} + +func exitCodeError(exitCode int) error { + return fmt.Errorf("oauth2/google: executable command failed with exit code %v", exitCode) +} + +func executableError(err error) error { + return fmt.Errorf("oauth2/google: executable command failed: %v", err) +} + +func executablesDisallowedError() error { + return errors.New("oauth2/google: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") +} + +func timeoutRangeError() error { + return errors.New("oauth2/google: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds") +} + +func commandMissingError() error { + return errors.New("oauth2/google: missing `command` field — executable command must be provided") +} + +type environment interface { + existingEnv() []string + getenv(string) string + run(ctx context.Context, command string, env []string) ([]byte, error) + now() time.Time +} + +type runtimeEnvironment struct{} + +func (r runtimeEnvironment) existingEnv() []string { + return os.Environ() +} + +func (r runtimeEnvironment) getenv(key string) string { + return os.Getenv(key) +} + +func (r runtimeEnvironment) now() time.Time { + return time.Now().UTC() +} + +func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) { + splitCommand := strings.Fields(command) + cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...) + cmd.Env = env + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + if ctx.Err() == context.DeadlineExceeded { + return nil, context.DeadlineExceeded + } + + if exitError, ok := err.(*exec.ExitError); ok { + return nil, exitCodeError(exitError.ExitCode()) + } + + return nil, executableError(err) + } + + bytesStdout := bytes.TrimSpace(stdout.Bytes()) + if len(bytesStdout) > 0 { + return bytesStdout, nil + } + return bytes.TrimSpace(stderr.Bytes()), nil +} + +type executableCredentialSource struct { + Command string + Timeout time.Duration + OutputFile string + ctx context.Context + config *Config + env environment +} + +// CreateExecutableCredential creates an executableCredentialSource given an ExecutableConfig. +// It also performs defaulting and type conversions. +func CreateExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) { + if ec.Command == "" { + return executableCredentialSource{}, commandMissingError() + } + + result := executableCredentialSource{} + result.Command = ec.Command + if ec.TimeoutMillis == nil { + result.Timeout = defaultTimeout + } else { + result.Timeout = time.Duration(*ec.TimeoutMillis) * time.Millisecond + if result.Timeout < timeoutMinimum || result.Timeout > timeoutMaximum { + return executableCredentialSource{}, timeoutRangeError() + } + } + result.OutputFile = ec.OutputFile + result.ctx = ctx + result.config = config + result.env = runtimeEnvironment{} + return result, nil +} + +type executableResponse struct { + Version int `json:"version,omitempty"` + Success *bool `json:"success,omitempty"` + TokenType string `json:"token_type,omitempty"` + ExpirationTime int64 `json:"expiration_time,omitempty"` + IdToken string `json:"id_token,omitempty"` + SamlResponse string `json:"saml_response,omitempty"` + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (cs executableCredentialSource) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) { + var result executableResponse + if err := json.Unmarshal(response, &result); err != nil { + return "", jsonParsingError(source, string(response)) + } + + if result.Version == 0 { + return "", missingFieldError(source, "version") + } + + if result.Success == nil { + return "", missingFieldError(source, "success") + } + + if !*result.Success { + if result.Code == "" || result.Message == "" { + return "", malformedFailureError() + } + return "", userDefinedError(result.Code, result.Message) + } + + if result.Version > executableSupportedMaxVersion || result.Version < 0 { + return "", unsupportedVersionError(source, result.Version) + } + + if result.ExpirationTime == 0 && cs.OutputFile != "" { + return "", missingFieldError(source, "expiration_time") + } + + if result.TokenType == "" { + return "", missingFieldError(source, "token_type") + } + + if result.ExpirationTime != 0 && result.ExpirationTime < now { + return "", tokenExpiredError() + } + + if result.TokenType == "urn:ietf:params:oauth:token-type:jwt" || result.TokenType == "urn:ietf:params:oauth:token-type:id_token" { + if result.IdToken == "" { + return "", missingFieldError(source, "id_token") + } + return result.IdToken, nil + } + + if result.TokenType == "urn:ietf:params:oauth:token-type:saml2" { + if result.SamlResponse == "" { + return "", missingFieldError(source, "saml_response") + } + return result.SamlResponse, nil + } + + return "", tokenTypeError(source) +} + +func (cs executableCredentialSource) subjectToken() (string, error) { + if token, err := cs.getTokenFromOutputFile(); token != "" || err != nil { + return token, err + } + + return cs.getTokenFromExecutableCommand() +} + +func (cs executableCredentialSource) getTokenFromOutputFile() (token string, err error) { + if cs.OutputFile == "" { + // This ExecutableCredentialSource doesn't use an OutputFile. + return "", nil + } + + file, err := os.Open(cs.OutputFile) + if err != nil { + // No OutputFile found. Hasn't been created yet, so skip it. + return "", nil + } + defer file.Close() + + data, err := ioutil.ReadAll(io.LimitReader(file, 1<<20)) + if err != nil || len(data) == 0 { + // Cachefile exists, but no data found. Get new credential. + return "", nil + } + + token, err = cs.parseSubjectTokenFromSource(data, outputFileSource, cs.env.now().Unix()) + if err != nil { + if _, ok := err.(nonCacheableError); ok { + // If the cached token is expired we need a new token, + // and if the cache contains a failure, we need to try again. + return "", nil + } + + // There was an error in the cached token, and the developer should be aware of it. + return "", err + } + // Token parsing succeeded. Use found token. + return token, nil +} + +func (cs executableCredentialSource) executableEnvironment() []string { + result := cs.env.existingEnv() + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", cs.config.Audience)) + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", cs.config.SubjectTokenType)) + result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0") + if cs.config.ServiceAccountImpersonationURL != "" { + matches := serviceAccountImpersonationRE.FindStringSubmatch(cs.config.ServiceAccountImpersonationURL) + if matches != nil { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1])) + } + } + if cs.OutputFile != "" { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", cs.OutputFile)) + } + return result +} + +func (cs executableCredentialSource) getTokenFromExecutableCommand() (string, error) { + // For security reasons, we need our consumers to set this environment variable to allow executables to be run. + if cs.env.getenv("GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES") != "1" { + return "", executablesDisallowedError() + } + + ctx, cancel := context.WithDeadline(cs.ctx, cs.env.now().Add(cs.Timeout)) + defer cancel() + + output, err := cs.env.run(ctx, cs.Command, cs.executableEnvironment()) + if err != nil { + return "", err + } + return cs.parseSubjectTokenFromSource(output, executableSource, cs.env.now().Unix()) +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go index 8251fc85e0054..54c8f209f3b7d 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go @@ -48,12 +48,19 @@ type ImpersonateTokenSource struct { // Each service account must be granted roles/iam.serviceAccountTokenCreator // on the next service account in the chain. Optional. Delegates []string + // TokenLifetimeSeconds is the number of seconds the impersonation token will + // be valid for. + TokenLifetimeSeconds int } // Token performs the exchange to get a temporary service account token to allow access to GCP. func (its ImpersonateTokenSource) Token() (*oauth2.Token, error) { + lifetimeString := "3600s" + if its.TokenLifetimeSeconds != 0 { + lifetimeString = fmt.Sprintf("%ds", its.TokenLifetimeSeconds) + } reqBody := generateAccessTokenReq{ - Lifetime: "3600s", + Lifetime: lifetimeString, Scope: its.Scopes, Delegates: its.Delegates, } diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go index 67d97b9904699..e89e6ae17bca2 100644 --- a/vendor/golang.org/x/oauth2/google/jwt.go +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -66,7 +66,8 @@ func newJWTSource(jsonKey []byte, audience string, scopes []string) (oauth2.Toke if err != nil { return nil, err } - return oauth2.ReuseTokenSource(tok, ts), nil + rts := newErrWrappingTokenSource(oauth2.ReuseTokenSource(tok, ts)) + return rts, nil } type jwtAccessTokenSource struct { diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go index 683d2d271a308..95015648b43fe 100644 --- a/vendor/golang.org/x/oauth2/jws/jws.go +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -178,5 +178,5 @@ func Verify(token string, key *rsa.PublicKey) error { h := sha256.New() h.Write([]byte(signedContent)) - return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString) } diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS deleted file mode 100644 index 15167cd746c56..0000000000000 --- a/vendor/golang.org/x/sync/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS deleted file mode 100644 index 1c4577e968061..0000000000000 --- a/vendor/golang.org/x/sync/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 4c0850a45aa14..cbee7a4e230d7 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -61,8 +61,8 @@ func (g *Group) Wait() error { // It blocks until the new goroutine can be added without the number of // active goroutines in the group exceeding the configured limit. // -// The first call to return a non-nil error cancels the group; its error will be -// returned by Wait. +// The first call to return a non-nil error cancels the group's context, if the +// group was created by calling WithContext. The error will be returned by Wait. func (g *Group) Go(f func() error) { if g.sem != nil { g.sem <- token{} diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS deleted file mode 100644 index 15167cd746c56..0000000000000 --- a/vendor/golang.org/x/sys/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS deleted file mode 100644 index 1c4577e968061..0000000000000 --- a/vendor/golang.org/x/sys/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go new file mode 100644 index 0000000000000..dd10eb79feefa --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && riscv64 +// +build !linux,riscv64 + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s new file mode 100644 index 0000000000000..d560019ea29e1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s @@ -0,0 +1,29 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (darwin || freebsd || netbsd || openbsd) && gc +// +build darwin freebsd netbsd openbsd +// +build gc + +#include "textflag.h" + +// System call support for RISCV64 BSD + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go deleted file mode 100644 index 761db66efece2..0000000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - DLT_HHDLC = 0x79 - IFF_SMART = 0x20 - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAITH = 0xf2 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_IPXIP = 0xf9 - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IPV6_MIN_MEMBERSHIPS = 0x1f - IP_FAITH = 0x16 - IP_MAX_SOURCE_FILTER = 0x400 - IP_MIN_MEMBERSHIPS = 0x1f - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RT_CACHING_CONTEXT = 0x1 - RT_NORTREF = 0x2 - SIOCADDRT = 0x8030720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8030720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go deleted file mode 100644 index 070f44b651048..0000000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - DLT_HHDLC = 0x79 - IFF_SMART = 0x20 - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAITH = 0xf2 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_IPXIP = 0xf9 - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IPV6_MIN_MEMBERSHIPS = 0x1f - IP_FAITH = 0x16 - IP_MAX_SOURCE_FILTER = 0x400 - IP_MIN_MEMBERSHIPS = 0x1f - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RT_CACHING_CONTEXT = 0x1 - RT_NORTREF = 0x2 - SIOCADDRT = 0x8040720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8040720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go deleted file mode 100644 index 856dca3254386..0000000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unix - -const ( - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - - // missing constants on FreeBSD-11.1-RELEASE, copied from old values in ztypes_freebsd_arm.go - IFF_SMART = 0x20 - IFT_FAITH = 0xf2 - IFT_IPXIP = 0xf9 - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IP_FAITH = 0x16 - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - SIOCADDRT = 0x8030720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8030720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go deleted file mode 100644 index 946dcf3fc7eca..0000000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - DLT_HHDLC = 0x79 - IPV6_MIN_MEMBERSHIPS = 0x1f - IP_MAX_SOURCE_FILTER = 0x400 - IP_MIN_MEMBERSHIPS = 0x1f - RT_CACHING_CONTEXT = 0x1 - RT_NORTREF = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index ee73623489b07..dcef4de6f1863 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -89,25 +89,30 @@ dragonfly_amd64) freebsd_386) mkerrors="$mkerrors -m32" mksyscall="go run mksyscall.go -l32" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_amd64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_arm) mkerrors="$mkerrors" mksyscall="go run mksyscall.go -l32 -arm" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; freebsd_arm64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +freebsd_riscv64) + mkerrors="$mkerrors -m64" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; netbsd_386) diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index d888fb770364d..2ab44aa659175 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -128,6 +128,7 @@ includes_FreeBSD=' #include #include #include +#include #include #include #include @@ -202,6 +203,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -295,6 +297,10 @@ struct ltchars { #define SOL_NETLINK 270 #endif +#ifndef SOL_SMC +#define SOL_SMC 286 +#endif + #ifdef SOL_BLUETOOTH // SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h // but it is already in bluetooth_linux.go @@ -529,7 +535,7 @@ ccflags="$@" $2 ~ /^(MS|MNT|MOUNT|UMOUNT)_/ || $2 ~ /^NS_GET_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || - $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|TFD)_/ || + $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|PIOD|TFD)_/ || $2 ~ /^KEXEC_/ || $2 ~ /^LINUX_REBOOT_CMD_/ || $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || @@ -553,6 +559,7 @@ ccflags="$@" $2 ~ /^CLONE_[A-Z_]+/ || $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && $2 ~ /^(BPF|DLT)_/ || + $2 ~ /^AUDIT_/ || $2 ~ /^(CLOCK|TIMER)_/ || $2 ~ /^CAN_/ || $2 ~ /^CAP_/ || @@ -575,7 +582,6 @@ ccflags="$@" $2 ~ /^SEEK_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || - $2 !~ /^AUDIT_RECORD_MAGIC/ && $2 !~ /IOC_MAGIC/ && $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^(VM|VMADDR)_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index ac579c60feb26..e2a30e88c653c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -218,13 +218,62 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { } func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { - // Recvmsg not implemented on AIX - return -1, -1, -1, ENOSYS + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(rsa)) + msg.Namelen = uint32(SizeofSockaddrAny) + var dummy byte + if len(oob) > 0 { + // receive at least one normal byte + if emptyIovecs(iov) { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } + if n, err = recvmsg(fd, &msg, flags); n == -1 { + return + } + oobn = int(msg.Controllen) + recvflags = int(msg.Flags) + return } func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { - // SendmsgN not implemented on AIX - return -1, ENOSYS + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(ptr)) + msg.Namelen = uint32(salen) + var dummy byte + var empty bool + if len(oob) > 0 { + // send at least one normal byte + empty := emptyIovecs(iov) + if empty { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } + if n, err = sendmsg(fd, &msg, flags); err != nil { + return 0, err + } + if len(oob) > 0 && empty { + n = 0 + } + return n, nil } func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 6f6c510f4130d..de7c23e0648ad 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -17,25 +17,12 @@ import ( "unsafe" ) -const ( - SYS_FSTAT_FREEBSD12 = 551 // { int fstat(int fd, _Out_ struct stat *sb); } - SYS_FSTATAT_FREEBSD12 = 552 // { int fstatat(int fd, _In_z_ char *path, \ - SYS_GETDIRENTRIES_FREEBSD12 = 554 // { ssize_t getdirentries(int fd, \ - SYS_STATFS_FREEBSD12 = 555 // { int statfs(_In_z_ char *path, \ - SYS_FSTATFS_FREEBSD12 = 556 // { int fstatfs(int fd, \ - SYS_GETFSSTAT_FREEBSD12 = 557 // { int getfsstat( \ - SYS_MKNODAT_FREEBSD12 = 559 // { int mknodat(int fd, _In_z_ char *path, \ -) - // See https://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/versions.html. var ( osreldateOnce sync.Once osreldate uint32 ) -// INO64_FIRST from /usr/src/lib/libc/sys/compat-ino64.h -const _ino64First = 1200031 - func supportsABI(ver uint32) bool { osreldateOnce.Do(func() { osreldate, _ = SysctlUint32("kern.osreldate") }) return osreldate >= ver @@ -159,38 +146,18 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { var ( - _p0 unsafe.Pointer - bufsize uintptr - oldBuf []statfs_freebsd11_t - needsConvert bool + _p0 unsafe.Pointer + bufsize uintptr ) - if len(buf) > 0 { - if supportsABI(_ino64First) { - _p0 = unsafe.Pointer(&buf[0]) - bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) - } else { - n := len(buf) - oldBuf = make([]statfs_freebsd11_t, n) - _p0 = unsafe.Pointer(&oldBuf[0]) - bufsize = unsafe.Sizeof(statfs_freebsd11_t{}) * uintptr(n) - needsConvert = true - } + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } - var sysno uintptr = SYS_GETFSSTAT - if supportsABI(_ino64First) { - sysno = SYS_GETFSSTAT_FREEBSD12 - } - r0, _, e1 := Syscall(sysno, uintptr(_p0), bufsize, uintptr(flags)) + r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) n = int(r0) if e1 != 0 { err = e1 } - if e1 == 0 && needsConvert { - for i := range oldBuf { - buf[i].convertFrom(&oldBuf[i]) - } - } return } @@ -245,87 +212,11 @@ func Uname(uname *Utsname) error { } func Stat(path string, st *Stat_t) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstatat_freebsd12(AT_FDCWD, path, st, 0) - } - err = stat(path, &oldStat) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil + return Fstatat(AT_FDCWD, path, st, 0) } func Lstat(path string, st *Stat_t) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstatat_freebsd12(AT_FDCWD, path, st, AT_SYMLINK_NOFOLLOW) - } - err = lstat(path, &oldStat) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil -} - -func Fstat(fd int, st *Stat_t) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstat_freebsd12(fd, st) - } - err = fstat(fd, &oldStat) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil -} - -func Fstatat(fd int, path string, st *Stat_t, flags int) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstatat_freebsd12(fd, path, st, flags) - } - err = fstatat(fd, path, &oldStat, flags) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil -} - -func Statfs(path string, st *Statfs_t) (err error) { - var oldStatfs statfs_freebsd11_t - if supportsABI(_ino64First) { - return statfs_freebsd12(path, st) - } - err = statfs(path, &oldStatfs) - if err != nil { - return err - } - - st.convertFrom(&oldStatfs) - return nil -} - -func Fstatfs(fd int, st *Statfs_t) (err error) { - var oldStatfs statfs_freebsd11_t - if supportsABI(_ino64First) { - return fstatfs_freebsd12(fd, st) - } - err = fstatfs(fd, &oldStatfs) - if err != nil { - return err - } - - st.convertFrom(&oldStatfs) - return nil + return Fstatat(AT_FDCWD, path, st, AT_SYMLINK_NOFOLLOW) } func Getdents(fd int, buf []byte) (n int, err error) { @@ -333,162 +224,25 @@ func Getdents(fd int, buf []byte) (n int, err error) { } func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - if supportsABI(_ino64First) { - if basep == nil || unsafe.Sizeof(*basep) == 8 { - return getdirentries_freebsd12(fd, buf, (*uint64)(unsafe.Pointer(basep))) - } - // The freebsd12 syscall needs a 64-bit base. On 32-bit machines - // we can't just use the basep passed in. See #32498. - var base uint64 = uint64(*basep) - n, err = getdirentries_freebsd12(fd, buf, &base) - *basep = uintptr(base) - if base>>32 != 0 { - // We can't stuff the base back into a uintptr, so any - // future calls would be suspect. Generate an error. - // EIO is allowed by getdirentries. - err = EIO - } - return - } - - // The old syscall entries are smaller than the new. Use 1/4 of the original - // buffer size rounded up to DIRBLKSIZ (see /usr/src/lib/libc/sys/getdirentries.c). - oldBufLen := roundup(len(buf)/4, _dirblksiz) - oldBuf := make([]byte, oldBufLen) - n, err = getdirentries(fd, oldBuf, basep) - if err == nil && n > 0 { - n = convertFromDirents11(buf, oldBuf[:n]) + if basep == nil || unsafe.Sizeof(*basep) == 8 { + return getdirentries(fd, buf, (*uint64)(unsafe.Pointer(basep))) + } + // The syscall needs a 64-bit base. On 32-bit machines + // we can't just use the basep passed in. See #32498. + var base uint64 = uint64(*basep) + n, err = getdirentries(fd, buf, &base) + *basep = uintptr(base) + if base>>32 != 0 { + // We can't stuff the base back into a uintptr, so any + // future calls would be suspect. Generate an error. + // EIO is allowed by getdirentries. + err = EIO } return } func Mknod(path string, mode uint32, dev uint64) (err error) { - var oldDev int - if supportsABI(_ino64First) { - return mknodat_freebsd12(AT_FDCWD, path, mode, dev) - } - oldDev = int(dev) - return mknod(path, mode, oldDev) -} - -func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { - var oldDev int - if supportsABI(_ino64First) { - return mknodat_freebsd12(fd, path, mode, dev) - } - oldDev = int(dev) - return mknodat(fd, path, mode, oldDev) -} - -// round x to the nearest multiple of y, larger or equal to x. -// -// from /usr/include/sys/param.h Macros for counting and rounding. -// #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) -func roundup(x, y int) int { - return ((x + y - 1) / y) * y -} - -func (s *Stat_t) convertFrom(old *stat_freebsd11_t) { - *s = Stat_t{ - Dev: uint64(old.Dev), - Ino: uint64(old.Ino), - Nlink: uint64(old.Nlink), - Mode: old.Mode, - Uid: old.Uid, - Gid: old.Gid, - Rdev: uint64(old.Rdev), - Atim: old.Atim, - Mtim: old.Mtim, - Ctim: old.Ctim, - Btim: old.Btim, - Size: old.Size, - Blocks: old.Blocks, - Blksize: old.Blksize, - Flags: old.Flags, - Gen: uint64(old.Gen), - } -} - -func (s *Statfs_t) convertFrom(old *statfs_freebsd11_t) { - *s = Statfs_t{ - Version: _statfsVersion, - Type: old.Type, - Flags: old.Flags, - Bsize: old.Bsize, - Iosize: old.Iosize, - Blocks: old.Blocks, - Bfree: old.Bfree, - Bavail: old.Bavail, - Files: old.Files, - Ffree: old.Ffree, - Syncwrites: old.Syncwrites, - Asyncwrites: old.Asyncwrites, - Syncreads: old.Syncreads, - Asyncreads: old.Asyncreads, - // Spare - Namemax: old.Namemax, - Owner: old.Owner, - Fsid: old.Fsid, - // Charspare - // Fstypename - // Mntfromname - // Mntonname - } - - sl := old.Fstypename[:] - n := clen(*(*[]byte)(unsafe.Pointer(&sl))) - copy(s.Fstypename[:], old.Fstypename[:n]) - - sl = old.Mntfromname[:] - n = clen(*(*[]byte)(unsafe.Pointer(&sl))) - copy(s.Mntfromname[:], old.Mntfromname[:n]) - - sl = old.Mntonname[:] - n = clen(*(*[]byte)(unsafe.Pointer(&sl))) - copy(s.Mntonname[:], old.Mntonname[:n]) -} - -func convertFromDirents11(buf []byte, old []byte) int { - const ( - fixedSize = int(unsafe.Offsetof(Dirent{}.Name)) - oldFixedSize = int(unsafe.Offsetof(dirent_freebsd11{}.Name)) - ) - - dstPos := 0 - srcPos := 0 - for dstPos+fixedSize < len(buf) && srcPos+oldFixedSize < len(old) { - var dstDirent Dirent - var srcDirent dirent_freebsd11 - - // If multiple direntries are written, sometimes when we reach the final one, - // we may have cap of old less than size of dirent_freebsd11. - copy((*[unsafe.Sizeof(srcDirent)]byte)(unsafe.Pointer(&srcDirent))[:], old[srcPos:]) - - reclen := roundup(fixedSize+int(srcDirent.Namlen)+1, 8) - if dstPos+reclen > len(buf) { - break - } - - dstDirent.Fileno = uint64(srcDirent.Fileno) - dstDirent.Off = 0 - dstDirent.Reclen = uint16(reclen) - dstDirent.Type = srcDirent.Type - dstDirent.Pad0 = 0 - dstDirent.Namlen = uint16(srcDirent.Namlen) - dstDirent.Pad1 = 0 - - copy(dstDirent.Name[:], srcDirent.Name[:srcDirent.Namlen]) - copy(buf[dstPos:], (*[unsafe.Sizeof(dstDirent)]byte)(unsafe.Pointer(&dstDirent))[:]) - padding := buf[dstPos+fixedSize+int(dstDirent.Namlen) : dstPos+reclen] - for i := range padding { - padding[i] = 0 - } - - dstPos += int(dstDirent.Reclen) - srcPos += int(srcDirent.Reclen) - } - - return dstPos + return Mknodat(AT_FDCWD, path, mode, dev) } func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { @@ -501,31 +255,31 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ptrace(request int, pid int, addr uintptr, data int) (err error) func PtraceAttach(pid int) (err error) { - return ptrace(PTRACE_ATTACH, pid, 0, 0) + return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceCont(pid int, signal int) (err error) { - return ptrace(PTRACE_CONT, pid, 1, signal) + return ptrace(PT_CONTINUE, pid, 1, signal) } func PtraceDetach(pid int) (err error) { - return ptrace(PTRACE_DETACH, pid, 1, 0) + return ptrace(PT_DETACH, pid, 1, 0) } func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) { - return ptrace(PTRACE_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) + return ptrace(PT_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) } func PtraceGetRegs(pid int, regsout *Reg) (err error) { - return ptrace(PTRACE_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) + return ptrace(PT_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) } func PtraceLwpEvents(pid int, enable int) (err error) { - return ptrace(PTRACE_LWPEVENTS, pid, 0, enable) + return ptrace(PT_LWP_EVENTS, pid, 0, enable) } func PtraceLwpInfo(pid int, info uintptr) (err error) { - return ptrace(PTRACE_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{}))) + return ptrace(PT_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{}))) } func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { @@ -545,11 +299,11 @@ func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { } func PtraceSetRegs(pid int, regs *Reg) (err error) { - return ptrace(PTRACE_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0) + return ptrace(PT_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0) } func PtraceSingleStep(pid int) (err error) { - return ptrace(PTRACE_SINGLESTEP, pid, 1, 0) + return ptrace(PT_STEP, pid, 1, 0) } /* @@ -591,16 +345,12 @@ func PtraceSingleStep(pid int) (err error) { //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) -//sys fstat(fd int, stat *stat_freebsd11_t) (err error) -//sys fstat_freebsd12(fd int, stat *Stat_t) (err error) -//sys fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) -//sys fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) -//sys fstatfs(fd int, stat *statfs_freebsd11_t) (err error) -//sys fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) -//sys getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) -//sys getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) +//sys getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) @@ -622,13 +372,10 @@ func PtraceSingleStep(pid int) (err error) { //sys Link(path string, link string) (err error) //sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) //sys Listen(s int, backlog int) (err error) -//sys lstat(path string, stat *stat_freebsd11_t) (err error) //sys Mkdir(path string, mode uint32) (err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) -//sys mknod(path string, mode uint32, dev int) (err error) -//sys mknodat(fd int, path string, mode uint32, dev int) (err error) -//sys mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) +//sys Mknodat(fd int, path string, mode uint32, dev uint64) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) @@ -658,9 +405,7 @@ func PtraceSingleStep(pid int) (err error) { //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) -//sys stat(path string, stat *stat_freebsd11_t) (err error) -//sys statfs(path string, stat *statfs_freebsd11_t) (err error) -//sys statfs_freebsd12(path string, stat *Statfs_t) (err error) +//sys Statfs(path string, stat *Statfs_t) (err error) //sys Symlink(path string, link string) (err error) //sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 342fc32b1686a..c3c4c698e0720 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -57,11 +57,11 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) + return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) } func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index a32d5aa4aed44..82be61a2f98b1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -57,11 +57,11 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) + return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) } func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 1e36d39abe018..cd58f1026c057 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -58,6 +58,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index a09a1537bd6f3..d6f538f9e0077 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -58,6 +58,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go new file mode 100644 index 0000000000000..8ea6e96100ace --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -0,0 +1,63 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +package unix + +import ( + "syscall" + "unsafe" +) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var writtenOut uint64 = 0 + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) + + written = int(writtenOut) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + return int(ioDesc.Len), err +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 440900112cd42..f8c2c5138748b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80084267 BIOCSETFNR = 0x80084282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8008427b BIOCSETZBUF = 0x800c4281 BIOCSHDRCMPLT = 0x80044275 @@ -447,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -487,10 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x113 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -734,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -814,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -911,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -989,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -1000,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1179,6 +1185,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1189,6 +1197,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1196,6 +1208,60 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETFSBASE = 0x47 + PT_GETGSBASE = 0x49 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GETXMMREGS = 0x40 + PT_GETXSTATE = 0x45 + PT_GETXSTATE_INFO = 0x44 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETFSBASE = 0x48 + PT_SETGSBASE = 0x4a + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SETXMMREGS = 0x41 + PT_SETXSTATE = 0x46 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1320,10 +1386,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0086924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1414,6 +1482,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_REUSEPORT_LB = 0x10000 @@ -1472,22 +1541,40 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b TCP_BBR_DRAIN_INC_EXTRA = 0x43c TCP_BBR_DRAIN_PG = 0x42e TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 TCP_BBR_IWINTSO = 0x42b TCP_BBR_LOWGAIN_FD = 0x436 TCP_BBR_LOWGAIN_HALF = 0x435 TCP_BBR_LOWGAIN_THRESH = 0x434 TCP_BBR_MAX_RTO = 0x439 TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 TCP_BBR_ONE_RETRAN = 0x431 TCP_BBR_PACE_CROSS = 0x442 TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 TCP_BBR_PACE_PER_SEC = 0x43e TCP_BBR_PACE_SEG_MAX = 0x440 TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 TCP_BBR_PROBE_RTT_GAIN = 0x44d TCP_BBR_PROBE_RTT_INT = 0x430 TCP_BBR_PROBE_RTT_LEN = 0x44e @@ -1496,12 +1583,18 @@ const ( TCP_BBR_REC_OVER_HPTS = 0x43a TCP_BBR_RETRAN_WTSO = 0x44b TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d TCP_BBR_STARTUP_LOSS_EXIT = 0x432 TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 TCP_BBR_UNLIMITED = 0x43b TCP_BBR_USEDEL_RATE = 0x437 TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 @@ -1541,6 +1634,7 @@ const ( TCP_PCAP_OUT = 0x800 TCP_RACK_EARLY_RECOV = 0x423 TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 TCP_RACK_IDLE_REDUCE_HIGH = 0x444 TCP_RACK_MIN_PACE = 0x445 TCP_RACK_MIN_PACE_SEG = 0x446 @@ -1554,7 +1648,6 @@ const ( TCP_RACK_PRR_SENDALOT = 0x421 TCP_RACK_REORD_FADE = 0x426 TCP_RACK_REORD_THRESH = 0x425 - TCP_RACK_SESS_CWV = 0x42a TCP_RACK_TLP_INC_VAR = 0x429 TCP_RACK_TLP_REDUCE = 0x41c TCP_RACK_TLP_THRESH = 0x427 @@ -1694,12 +1787,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1842,7 +1936,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1904,6 +1998,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 64520d31226b9..96310c3be1b0a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80104267 BIOCSETFNR = 0x80104282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8010427b BIOCSETZBUF = 0x80184281 BIOCSHDRCMPLT = 0x80044275 @@ -447,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -487,10 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x113 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -734,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -814,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -911,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -989,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -1000,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1180,6 +1186,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1190,6 +1198,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1197,6 +1209,58 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETFSBASE = 0x47 + PT_GETGSBASE = 0x49 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GETXSTATE = 0x45 + PT_GETXSTATE_INFO = 0x44 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETFSBASE = 0x48 + PT_SETGSBASE = 0x4a + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SETXSTATE = 0x46 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1321,10 +1385,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0106924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1415,6 +1481,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_REUSEPORT_LB = 0x10000 @@ -1473,22 +1540,40 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b TCP_BBR_DRAIN_INC_EXTRA = 0x43c TCP_BBR_DRAIN_PG = 0x42e TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 TCP_BBR_IWINTSO = 0x42b TCP_BBR_LOWGAIN_FD = 0x436 TCP_BBR_LOWGAIN_HALF = 0x435 TCP_BBR_LOWGAIN_THRESH = 0x434 TCP_BBR_MAX_RTO = 0x439 TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 TCP_BBR_ONE_RETRAN = 0x431 TCP_BBR_PACE_CROSS = 0x442 TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 TCP_BBR_PACE_PER_SEC = 0x43e TCP_BBR_PACE_SEG_MAX = 0x440 TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 TCP_BBR_PROBE_RTT_GAIN = 0x44d TCP_BBR_PROBE_RTT_INT = 0x430 TCP_BBR_PROBE_RTT_LEN = 0x44e @@ -1497,12 +1582,18 @@ const ( TCP_BBR_REC_OVER_HPTS = 0x43a TCP_BBR_RETRAN_WTSO = 0x44b TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d TCP_BBR_STARTUP_LOSS_EXIT = 0x432 TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 TCP_BBR_UNLIMITED = 0x43b TCP_BBR_USEDEL_RATE = 0x437 TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 @@ -1542,6 +1633,7 @@ const ( TCP_PCAP_OUT = 0x800 TCP_RACK_EARLY_RECOV = 0x423 TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 TCP_RACK_IDLE_REDUCE_HIGH = 0x444 TCP_RACK_MIN_PACE = 0x445 TCP_RACK_MIN_PACE_SEG = 0x446 @@ -1555,7 +1647,6 @@ const ( TCP_RACK_PRR_SENDALOT = 0x421 TCP_RACK_REORD_FADE = 0x426 TCP_RACK_REORD_THRESH = 0x425 - TCP_RACK_SESS_CWV = 0x42a TCP_RACK_TLP_INC_VAR = 0x429 TCP_RACK_TLP_REDUCE = 0x41c TCP_RACK_TLP_THRESH = 0x427 @@ -1693,12 +1784,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1841,7 +1933,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1903,6 +1995,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 99e9a0e06e95f..777b69defa04d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80084267 BIOCSETFNR = 0x80084282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8008427b BIOCSETZBUF = 0x800c4281 BIOCSHDRCMPLT = 0x80044275 @@ -362,7 +363,7 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0x18 CTL_NET = 0x4 - DIOCGATTR = 0xc144648e + DIOCGATTR = 0xc148648e DIOCGDELETE = 0x80106488 DIOCGFLUSH = 0x20006487 DIOCGFRONTSTUFF = 0x40086486 @@ -377,7 +378,7 @@ const ( DIOCGSTRIPESIZE = 0x4008648b DIOCSKERNELDUMP = 0x804c6490 DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 - DIOCZONECMD = 0xc06c648f + DIOCZONECMD = 0xc078648f DLT_A429 = 0xb8 DLT_A653_ICM = 0xb9 DLT_AIRONET_HEADER = 0x78 @@ -407,7 +408,9 @@ const ( DLT_C_HDLC_WITH_DIR = 0xcd DLT_DBUS = 0xe7 DLT_DECT = 0xdd + DLT_DISPLAYPORT_AUX = 0x113 DLT_DOCSIS = 0x8f + DLT_DOCSIS31_XRA31 = 0x111 DLT_DVB_CI = 0xeb DLT_ECONET = 0x73 DLT_EN10MB = 0x1 @@ -417,6 +420,7 @@ const ( DLT_ERF = 0xc5 DLT_ERF_ETH = 0xaf DLT_ERF_POS = 0xb0 + DLT_ETHERNET_MPACKET = 0x112 DLT_FC_2 = 0xe0 DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 DLT_FDDI = 0xa @@ -444,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -484,9 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c + DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x109 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -502,7 +508,9 @@ const ( DLT_NFC_LLCP = 0xf5 DLT_NFLOG = 0xef DLT_NG40 = 0xf4 + DLT_NORDIC_BLE = 0x110 DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PCI_EXP = 0x7d DLT_PFLOG = 0x75 DLT_PFSYNC = 0x79 @@ -526,15 +534,18 @@ const ( DLT_RTAC_SERIAL = 0xfa DLT_SCCP = 0x8e DLT_SCTP = 0xf8 + DLT_SDLC = 0x10c DLT_SITA = 0xc4 DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xd DLT_STANAG_5066_D_PDU = 0xed DLT_SUNATM = 0x7b DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TI_LLN_SNIFFER = 0x10d DLT_TZSP = 0x80 DLT_USB = 0xba DLT_USBPCAP = 0xf9 + DLT_USB_DARWIN = 0x10a DLT_USB_FREEBSD = 0xba DLT_USB_LINUX = 0xbd DLT_USB_LINUX_MMAPPED = 0xdc @@ -554,6 +565,7 @@ const ( DLT_USER7 = 0x9a DLT_USER8 = 0x9b DLT_USER9 = 0x9c + DLT_VSOCK = 0x10f DLT_WATTSTOPPER_DLM = 0x107 DLT_WIHART = 0xdf DLT_WIRESHARK_UPPER_PDU = 0xfc @@ -578,6 +590,7 @@ const ( ECHONL = 0x10 ECHOPRT = 0x20 EVFILT_AIO = -0x3 + EVFILT_EMPTY = -0xd EVFILT_FS = -0x9 EVFILT_LIO = -0xa EVFILT_PROC = -0x5 @@ -585,11 +598,12 @@ const ( EVFILT_READ = -0x1 EVFILT_SENDFILE = -0xc EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xc + EVFILT_SYSCOUNT = 0xd EVFILT_TIMER = -0x7 EVFILT_USER = -0xb EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVNAMEMAP_NAME_SIZE = 0x40 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 @@ -606,6 +620,7 @@ const ( EV_RECEIPT = 0x40 EV_SYSFLAGS = 0xf000 EXTA = 0x4b00 + EXTATTR_MAXNAMELEN = 0xff EXTATTR_NAMESPACE_EMPTY = 0x0 EXTATTR_NAMESPACE_SYSTEM = 0x2 EXTATTR_NAMESPACE_USER = 0x1 @@ -647,6 +662,7 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 + IFCAP_WOL_MAGIC = 0x2000 IFF_ALLMULTI = 0x200 IFF_ALTPHYS = 0x4000 IFF_BROADCAST = 0x2 @@ -663,6 +679,7 @@ const ( IFF_MONITOR = 0x40000 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 + IFF_NOGROUP = 0x800000 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PPROMISC = 0x20000 @@ -719,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -799,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -837,6 +854,7 @@ const ( IPV6_DSTOPTS = 0x32 IPV6_FLOWID = 0x43 IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_LEN = 0x14 IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWTYPE = 0x44 IPV6_FRAGTTL = 0x78 @@ -857,13 +875,13 @@ const ( IPV6_MAX_GROUP_SRC_FILTER = 0x200 IPV6_MAX_MEMBERSHIPS = 0xfff IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb IPV6_NEXTHOP = 0x30 + IPV6_ORIGDSTADDR = 0x48 IPV6_PATHMTU = 0x2c IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe @@ -875,6 +893,7 @@ const ( IPV6_RECVFLOWID = 0x46 IPV6_RECVHOPLIMIT = 0x25 IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVORIGDSTADDR = 0x48 IPV6_RECVPATHMTU = 0x2b IPV6_RECVPKTINFO = 0x24 IPV6_RECVRSSBUCKETID = 0x47 @@ -894,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -935,10 +955,8 @@ const ( IP_MAX_MEMBERSHIPS = 0xfff IP_MAX_SOCK_MUTE_FILTER = 0x80 IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MAX_SOURCE_FILTER = 0x400 IP_MF = 0x2000 IP_MINTTL = 0x42 - IP_MIN_MEMBERSHIPS = 0x1f IP_MSFILTER = 0x4a IP_MSS = 0x240 IP_MULTICAST_IF = 0x9 @@ -948,6 +966,7 @@ const ( IP_OFFMASK = 0x1fff IP_ONESBCAST = 0x17 IP_OPTIONS = 0x1 + IP_ORIGDSTADDR = 0x1b IP_PORTRANGE = 0x13 IP_PORTRANGE_DEFAULT = 0x0 IP_PORTRANGE_HIGH = 0x1 @@ -956,6 +975,7 @@ const ( IP_RECVFLOWID = 0x5d IP_RECVIF = 0x14 IP_RECVOPTS = 0x5 + IP_RECVORIGDSTADDR = 0x1b IP_RECVRETOPTS = 0x6 IP_RECVRSSBUCKETID = 0x5e IP_RECVTOS = 0x44 @@ -972,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -983,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1071,10 +1094,12 @@ const ( MNT_SUSPEND = 0x4 MNT_SYNCHRONOUS = 0x2 MNT_UNION = 0x20 + MNT_UNTRUSTED = 0x800000000 MNT_UPDATE = 0x10000 - MNT_UPDATEMASK = 0x2d8d0807e + MNT_UPDATEMASK = 0xad8d0807e MNT_USER = 0x8000 - MNT_VISFLAGMASK = 0x3fef0ffff + MNT_VERIFIED = 0x400000000 + MNT_VISFLAGMASK = 0xffef0ffff MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 @@ -1103,6 +1128,7 @@ const ( NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 + NOTE_ABSTIME = 0x10 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 NOTE_CLOSE = 0x100 @@ -1159,6 +1185,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1169,6 +1197,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1176,6 +1208,53 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GETVFPREGS = 0x40 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SETVFPREGS = 0x41 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1257,7 +1336,6 @@ const ( RTV_WEIGHT = 0x100 RT_ALL_FIBS = -0x1 RT_BLACKHOLE = 0x40 - RT_CACHING_CONTEXT = 0x1 RT_DEFAULT_FIB = 0x0 RT_HAS_GW = 0x80 RT_HAS_HEADER = 0x10 @@ -1267,15 +1345,17 @@ const ( RT_LLE_CACHE = 0x100 RT_MAY_LOOP = 0x8 RT_MAY_LOOP_BIT = 0x3 - RT_NORTREF = 0x2 RT_REJECT = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_BINTIME = 0x4 SCM_CREDS = 0x3 + SCM_MONOTONIC = 0x6 + SCM_REALTIME = 0x5 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 + SCM_TIME_INFO = 0x7 SEEK_CUR = 0x1 SEEK_DATA = 0x3 SEEK_END = 0x2 @@ -1299,10 +1379,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0086924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1318,8 +1400,11 @@ const ( SIOCGIFPDSTADDR = 0xc0206948 SIOCGIFPHYS = 0xc0206935 SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRSSHASH = 0xc0186997 + SIOCGIFRSSKEY = 0xc0946996 SIOCGIFSTATUS = 0xc331693b SIOCGIFXMEDIA = 0xc028698b + SIOCGLANPCP = 0xc0206998 SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 SIOCGPRIVATE_0 = 0xc0206950 @@ -1350,6 +1435,7 @@ const ( SIOCSIFPHYS = 0x80206936 SIOCSIFRVNET = 0xc020695b SIOCSIFVNET = 0xc020695a + SIOCSLANPCP = 0x80206999 SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 SIOCSTUNFIB = 0x8020695f @@ -1369,6 +1455,7 @@ const ( SO_BINTIME = 0x2000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1019 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1377,6 +1464,7 @@ const ( SO_LISTENINCQLEN = 0x1013 SO_LISTENQLEN = 0x1012 SO_LISTENQLIMIT = 0x1011 + SO_MAX_PACING_RATE = 0x1018 SO_NOSIGPIPE = 0x800 SO_NO_DDP = 0x8000 SO_NO_OFFLOAD = 0x4000 @@ -1387,13 +1475,22 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 + SO_REUSEPORT_LB = 0x10000 SO_SETFIB = 0x1014 SO_SNDBUF = 0x1001 SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 SO_TIMESTAMP = 0x400 + SO_TS_BINTIME = 0x1 + SO_TS_CLOCK = 0x1017 + SO_TS_CLOCK_MAX = 0x3 + SO_TS_DEFAULT = 0x0 + SO_TS_MONOTONIC = 0x3 + SO_TS_REALTIME = 0x2 + SO_TS_REALTIME_MICRO = 0x0 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 SO_USER_COOKIE = 0x1015 @@ -1437,10 +1534,69 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 + TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b + TCP_BBR_DRAIN_INC_EXTRA = 0x43c + TCP_BBR_DRAIN_PG = 0x42e + TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 + TCP_BBR_IWINTSO = 0x42b + TCP_BBR_LOWGAIN_FD = 0x436 + TCP_BBR_LOWGAIN_HALF = 0x435 + TCP_BBR_LOWGAIN_THRESH = 0x434 + TCP_BBR_MAX_RTO = 0x439 + TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 + TCP_BBR_ONE_RETRAN = 0x431 + TCP_BBR_PACE_CROSS = 0x442 + TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 + TCP_BBR_PACE_PER_SEC = 0x43e + TCP_BBR_PACE_SEG_MAX = 0x440 + TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 + TCP_BBR_PROBE_RTT_GAIN = 0x44d + TCP_BBR_PROBE_RTT_INT = 0x430 + TCP_BBR_PROBE_RTT_LEN = 0x44e + TCP_BBR_RACK_RTT_USE = 0x44a + TCP_BBR_RECFORCE = 0x42c + TCP_BBR_REC_OVER_HPTS = 0x43a + TCP_BBR_RETRAN_WTSO = 0x44b + TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f + TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d + TCP_BBR_STARTUP_LOSS_EXIT = 0x432 + TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 + TCP_BBR_UNLIMITED = 0x43b + TCP_BBR_USEDEL_RATE = 0x437 + TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 + TCP_DATA_AFTER_CLOSE = 0x44c + TCP_DELACK = 0x48 TCP_FASTOPEN = 0x401 + TCP_FASTOPEN_MAX_COOKIE_LEN = 0x10 + TCP_FASTOPEN_MIN_COOKIE_LEN = 0x4 + TCP_FASTOPEN_PSK_LEN = 0x10 TCP_FUNCTION_BLK = 0x2000 TCP_FUNCTION_NAME_LEN_MAX = 0x20 TCP_INFO = 0x20 @@ -1448,6 +1604,12 @@ const ( TCP_KEEPIDLE = 0x100 TCP_KEEPINIT = 0x80 TCP_KEEPINTVL = 0x200 + TCP_LOG = 0x22 + TCP_LOGBUF = 0x23 + TCP_LOGDUMP = 0x25 + TCP_LOGDUMPID = 0x26 + TCP_LOGID = 0x24 + TCP_LOG_ID_LEN = 0x40 TCP_MAXBURST = 0x4 TCP_MAXHLEN = 0x3c TCP_MAXOLEN = 0x28 @@ -1463,8 +1625,30 @@ const ( TCP_NOPUSH = 0x4 TCP_PCAP_IN = 0x1000 TCP_PCAP_OUT = 0x800 + TCP_RACK_EARLY_RECOV = 0x423 + TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 + TCP_RACK_IDLE_REDUCE_HIGH = 0x444 + TCP_RACK_MIN_PACE = 0x445 + TCP_RACK_MIN_PACE_SEG = 0x446 + TCP_RACK_MIN_TO = 0x422 + TCP_RACK_PACE_ALWAYS = 0x41f + TCP_RACK_PACE_MAX_SEG = 0x41e + TCP_RACK_PACE_REDUCE = 0x41d + TCP_RACK_PKT_DELAY = 0x428 + TCP_RACK_PROP = 0x41b + TCP_RACK_PROP_RATE = 0x420 + TCP_RACK_PRR_SENDALOT = 0x421 + TCP_RACK_REORD_FADE = 0x426 + TCP_RACK_REORD_THRESH = 0x425 + TCP_RACK_TLP_INC_VAR = 0x429 + TCP_RACK_TLP_REDUCE = 0x41c + TCP_RACK_TLP_THRESH = 0x427 + TCP_RACK_TLP_USE = 0x447 TCP_VENDOR = 0x80000000 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCONS = 0x80047462 @@ -1528,6 +1712,8 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1592,12 +1778,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1740,7 +1927,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1802,6 +1989,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 4c837711493ff..c557ac2db317a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80104267 BIOCSETFNR = 0x80104282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8010427b BIOCSETZBUF = 0x80184281 BIOCSHDRCMPLT = 0x80044275 @@ -447,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -487,10 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x113 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -734,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -814,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -911,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -989,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -1000,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1180,6 +1186,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1190,6 +1198,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1197,6 +1209,51 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1321,10 +1378,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0106924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1415,6 +1474,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_REUSEPORT_LB = 0x10000 @@ -1473,22 +1533,40 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b TCP_BBR_DRAIN_INC_EXTRA = 0x43c TCP_BBR_DRAIN_PG = 0x42e TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 TCP_BBR_IWINTSO = 0x42b TCP_BBR_LOWGAIN_FD = 0x436 TCP_BBR_LOWGAIN_HALF = 0x435 TCP_BBR_LOWGAIN_THRESH = 0x434 TCP_BBR_MAX_RTO = 0x439 TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 TCP_BBR_ONE_RETRAN = 0x431 TCP_BBR_PACE_CROSS = 0x442 TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 TCP_BBR_PACE_PER_SEC = 0x43e TCP_BBR_PACE_SEG_MAX = 0x440 TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 TCP_BBR_PROBE_RTT_GAIN = 0x44d TCP_BBR_PROBE_RTT_INT = 0x430 TCP_BBR_PROBE_RTT_LEN = 0x44e @@ -1497,12 +1575,18 @@ const ( TCP_BBR_REC_OVER_HPTS = 0x43a TCP_BBR_RETRAN_WTSO = 0x44b TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d TCP_BBR_STARTUP_LOSS_EXIT = 0x432 TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 TCP_BBR_UNLIMITED = 0x43b TCP_BBR_USEDEL_RATE = 0x437 TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 @@ -1542,6 +1626,7 @@ const ( TCP_PCAP_OUT = 0x800 TCP_RACK_EARLY_RECOV = 0x423 TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 TCP_RACK_IDLE_REDUCE_HIGH = 0x444 TCP_RACK_MIN_PACE = 0x445 TCP_RACK_MIN_PACE_SEG = 0x446 @@ -1555,7 +1640,6 @@ const ( TCP_RACK_PRR_SENDALOT = 0x421 TCP_RACK_REORD_FADE = 0x426 TCP_RACK_REORD_THRESH = 0x425 - TCP_RACK_SESS_CWV = 0x42a TCP_RACK_TLP_INC_VAR = 0x429 TCP_RACK_TLP_REDUCE = 0x41c TCP_RACK_TLP_THRESH = 0x427 @@ -1694,12 +1778,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1842,7 +1927,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1904,6 +1989,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go new file mode 100644 index 0000000000000..341b4d96265b1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go @@ -0,0 +1,2148 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x23 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x24 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_HYPERV = 0x2b + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_INET6_SDP = 0x2a + AF_INET_SDP = 0x28 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x2b + AF_NATM = 0x1d + AF_NETBIOS = 0x6 + AF_NETGRAPH = 0x20 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SCLUSTER = 0x22 + AF_SIP = 0x18 + AF_SLOW = 0x21 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VENDOR00 = 0x27 + AF_VENDOR01 = 0x29 + AF_VENDOR03 = 0x2d + AF_VENDOR04 = 0x2f + AF_VENDOR05 = 0x31 + AF_VENDOR06 = 0x33 + AF_VENDOR07 = 0x35 + AF_VENDOR08 = 0x37 + AF_VENDOR09 = 0x39 + AF_VENDOR10 = 0x3b + AF_VENDOR11 = 0x3d + AF_VENDOR12 = 0x3f + AF_VENDOR13 = 0x41 + AF_VENDOR14 = 0x43 + AF_VENDOR15 = 0x45 + AF_VENDOR16 = 0x47 + AF_VENDOR17 = 0x49 + AF_VENDOR18 = 0x4b + AF_VENDOR19 = 0x4d + AF_VENDOR20 = 0x4f + AF_VENDOR21 = 0x51 + AF_VENDOR22 = 0x53 + AF_VENDOR23 = 0x55 + AF_VENDOR24 = 0x57 + AF_VENDOR25 = 0x59 + AF_VENDOR26 = 0x5b + AF_VENDOR27 = 0x5d + AF_VENDOR28 = 0x5f + AF_VENDOR29 = 0x61 + AF_VENDOR30 = 0x63 + AF_VENDOR31 = 0x65 + AF_VENDOR32 = 0x67 + AF_VENDOR33 = 0x69 + AF_VENDOR34 = 0x6b + AF_VENDOR35 = 0x6d + AF_VENDOR36 = 0x6f + AF_VENDOR37 = 0x71 + AF_VENDOR38 = 0x73 + AF_VENDOR39 = 0x75 + AF_VENDOR40 = 0x77 + AF_VENDOR41 = 0x79 + AF_VENDOR42 = 0x7b + AF_VENDOR43 = 0x7d + AF_VENDOR44 = 0x7f + AF_VENDOR45 = 0x81 + AF_VENDOR46 = 0x83 + AF_VENDOR47 = 0x85 + ALTWERASE = 0x200 + B0 = 0x0 + B1000000 = 0xf4240 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1500000 = 0x16e360 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B2000000 = 0x1e8480 + B230400 = 0x38400 + B2400 = 0x960 + B2500000 = 0x2625a0 + B28800 = 0x7080 + B300 = 0x12c + B3000000 = 0x2dc6c0 + B3500000 = 0x3567e0 + B38400 = 0x9600 + B4000000 = 0x3d0900 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B500000 = 0x7a120 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427c + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRECTION = 0x40044276 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0104279 + BIOCGETBUFMODE = 0x4004427d + BIOCGETIF = 0x4020426b + BIOCGETZMAX = 0x4008427f + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCGTSTAMP = 0x40044283 + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCROTZBUF = 0x40184280 + BIOCSBLEN = 0xc0044266 + BIOCSDIRECTION = 0x80044277 + BIOCSDLT = 0x80044278 + BIOCSETBUFMODE = 0x8004427e + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x80104282 + BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 + BIOCSETWF = 0x8010427b + BIOCSETZBUF = 0x80184281 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCSTSTAMP = 0x80044284 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x8 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_BUFMODE_BUFFER = 0x1 + BPF_BUFMODE_ZBUF = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_T_BINTIME = 0x2 + BPF_T_BINTIME_FAST = 0x102 + BPF_T_BINTIME_MONOTONIC = 0x202 + BPF_T_BINTIME_MONOTONIC_FAST = 0x302 + BPF_T_FAST = 0x100 + BPF_T_FLAG_MASK = 0x300 + BPF_T_FORMAT_MASK = 0x3 + BPF_T_MICROTIME = 0x0 + BPF_T_MICROTIME_FAST = 0x100 + BPF_T_MICROTIME_MONOTONIC = 0x200 + BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 + BPF_T_MONOTONIC = 0x200 + BPF_T_MONOTONIC_FAST = 0x300 + BPF_T_NANOTIME = 0x1 + BPF_T_NANOTIME_FAST = 0x101 + BPF_T_NANOTIME_MONOTONIC = 0x201 + BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 + BPF_T_NONE = 0x3 + BPF_T_NORMAL = 0x0 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + CAP_ACCEPT = 0x200000020000000 + CAP_ACL_CHECK = 0x400000000010000 + CAP_ACL_DELETE = 0x400000000020000 + CAP_ACL_GET = 0x400000000040000 + CAP_ACL_SET = 0x400000000080000 + CAP_ALL0 = 0x20007ffffffffff + CAP_ALL1 = 0x4000000001fffff + CAP_BIND = 0x200000040000000 + CAP_BINDAT = 0x200008000000400 + CAP_CHFLAGSAT = 0x200000000001400 + CAP_CONNECT = 0x200000080000000 + CAP_CONNECTAT = 0x200010000000400 + CAP_CREATE = 0x200000000000040 + CAP_EVENT = 0x400000000000020 + CAP_EXTATTR_DELETE = 0x400000000001000 + CAP_EXTATTR_GET = 0x400000000002000 + CAP_EXTATTR_LIST = 0x400000000004000 + CAP_EXTATTR_SET = 0x400000000008000 + CAP_FCHDIR = 0x200000000000800 + CAP_FCHFLAGS = 0x200000000001000 + CAP_FCHMOD = 0x200000000002000 + CAP_FCHMODAT = 0x200000000002400 + CAP_FCHOWN = 0x200000000004000 + CAP_FCHOWNAT = 0x200000000004400 + CAP_FCNTL = 0x200000000008000 + CAP_FCNTL_ALL = 0x78 + CAP_FCNTL_GETFL = 0x8 + CAP_FCNTL_GETOWN = 0x20 + CAP_FCNTL_SETFL = 0x10 + CAP_FCNTL_SETOWN = 0x40 + CAP_FEXECVE = 0x200000000000080 + CAP_FLOCK = 0x200000000010000 + CAP_FPATHCONF = 0x200000000020000 + CAP_FSCK = 0x200000000040000 + CAP_FSTAT = 0x200000000080000 + CAP_FSTATAT = 0x200000000080400 + CAP_FSTATFS = 0x200000000100000 + CAP_FSYNC = 0x200000000000100 + CAP_FTRUNCATE = 0x200000000000200 + CAP_FUTIMES = 0x200000000200000 + CAP_FUTIMESAT = 0x200000000200400 + CAP_GETPEERNAME = 0x200000100000000 + CAP_GETSOCKNAME = 0x200000200000000 + CAP_GETSOCKOPT = 0x200000400000000 + CAP_IOCTL = 0x400000000000080 + CAP_IOCTLS_ALL = 0x7fffffffffffffff + CAP_KQUEUE = 0x400000000100040 + CAP_KQUEUE_CHANGE = 0x400000000100000 + CAP_KQUEUE_EVENT = 0x400000000000040 + CAP_LINKAT_SOURCE = 0x200020000000400 + CAP_LINKAT_TARGET = 0x200000000400400 + CAP_LISTEN = 0x200000800000000 + CAP_LOOKUP = 0x200000000000400 + CAP_MAC_GET = 0x400000000000001 + CAP_MAC_SET = 0x400000000000002 + CAP_MKDIRAT = 0x200000000800400 + CAP_MKFIFOAT = 0x200000001000400 + CAP_MKNODAT = 0x200000002000400 + CAP_MMAP = 0x200000000000010 + CAP_MMAP_R = 0x20000000000001d + CAP_MMAP_RW = 0x20000000000001f + CAP_MMAP_RWX = 0x20000000000003f + CAP_MMAP_RX = 0x20000000000003d + CAP_MMAP_W = 0x20000000000001e + CAP_MMAP_WX = 0x20000000000003e + CAP_MMAP_X = 0x20000000000003c + CAP_PDGETPID = 0x400000000000200 + CAP_PDKILL = 0x400000000000800 + CAP_PDWAIT = 0x400000000000400 + CAP_PEELOFF = 0x200001000000000 + CAP_POLL_EVENT = 0x400000000000020 + CAP_PREAD = 0x20000000000000d + CAP_PWRITE = 0x20000000000000e + CAP_READ = 0x200000000000001 + CAP_RECV = 0x200000000000001 + CAP_RENAMEAT_SOURCE = 0x200000004000400 + CAP_RENAMEAT_TARGET = 0x200040000000400 + CAP_RIGHTS_VERSION = 0x0 + CAP_RIGHTS_VERSION_00 = 0x0 + CAP_SEEK = 0x20000000000000c + CAP_SEEK_TELL = 0x200000000000004 + CAP_SEM_GETVALUE = 0x400000000000004 + CAP_SEM_POST = 0x400000000000008 + CAP_SEM_WAIT = 0x400000000000010 + CAP_SEND = 0x200000000000002 + CAP_SETSOCKOPT = 0x200002000000000 + CAP_SHUTDOWN = 0x200004000000000 + CAP_SOCK_CLIENT = 0x200007780000003 + CAP_SOCK_SERVER = 0x200007f60000003 + CAP_SYMLINKAT = 0x200000008000400 + CAP_TTYHOOK = 0x400000000000100 + CAP_UNLINKAT = 0x200000010000400 + CAP_UNUSED0_44 = 0x200080000000000 + CAP_UNUSED0_57 = 0x300000000000000 + CAP_UNUSED1_22 = 0x400000000200000 + CAP_UNUSED1_57 = 0x500000000000000 + CAP_WRITE = 0x200000000000002 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x5 + CLOCK_MONOTONIC = 0x4 + CLOCK_MONOTONIC_COARSE = 0xc + CLOCK_MONOTONIC_FAST = 0xc + CLOCK_MONOTONIC_PRECISE = 0xb + CLOCK_PROCESS_CPUTIME_ID = 0xf + CLOCK_PROF = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_COARSE = 0xa + CLOCK_REALTIME_FAST = 0xa + CLOCK_REALTIME_PRECISE = 0x9 + CLOCK_SECOND = 0xd + CLOCK_THREAD_CPUTIME_ID = 0xe + CLOCK_UPTIME = 0x5 + CLOCK_UPTIME_FAST = 0x8 + CLOCK_UPTIME_PRECISE = 0x7 + CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0x18 + CTL_NET = 0x4 + DIOCGATTR = 0xc148648e + DIOCGDELETE = 0x80106488 + DIOCGFLUSH = 0x20006487 + DIOCGFWHEADS = 0x40046483 + DIOCGFWSECTORS = 0x40046482 + DIOCGIDENT = 0x41006489 + DIOCGKERNELDUMP = 0xc0986492 + DIOCGMEDIASIZE = 0x40086481 + DIOCGPHYSPATH = 0x4400648d + DIOCGPROVIDERNAME = 0x4400648a + DIOCGSECTORSIZE = 0x40046480 + DIOCGSTRIPEOFFSET = 0x4008648c + DIOCGSTRIPESIZE = 0x4008648b + DIOCSKERNELDUMP = 0x80986491 + DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 + DIOCSKERNELDUMP_FREEBSD12 = 0x80506490 + DIOCZONECMD = 0xc080648f + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_BREDR_BB = 0xff + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_BLUETOOTH_LE_LL = 0xfb + DLT_BLUETOOTH_LE_LL_WITH_PHDR = 0x100 + DLT_BLUETOOTH_LINUX_MONITOR = 0xfe + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_CLASS_NETBSD_RAWAF = 0x2240000 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DISPLAYPORT_AUX = 0x113 + DLT_DOCSIS = 0x8f + DLT_DOCSIS31_XRA31 = 0x111 + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_EPON = 0x103 + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_ETHERNET_MPACKET = 0x112 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_INFINIBAND = 0xf7 + DLT_IPFILTER = 0x74 + DLT_IPMB_KONTRON = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPMI_HPM_2 = 0x104 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_ISO_14443 = 0x108 + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 + DLT_LOOP = 0x6c + DLT_LORATAP = 0x10e + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0x114 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NETLINK = 0xfd + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NORDIC_BLE = 0x110 + DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x79 + DLT_PKTAP = 0x102 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0xe + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PROFIBUS_DL = 0x101 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RDS = 0x109 + DLT_REDBACK_SMARTEDGE = 0x20 + DLT_RIO = 0x7c + DLT_RTAC_SERIAL = 0xfa + DLT_SCCP = 0x8e + DLT_SCTP = 0xf8 + DLT_SDLC = 0x10c + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xd + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TI_LLN_SNIFFER = 0x10d + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USBPCAP = 0xf9 + DLT_USB_DARWIN = 0x10a + DLT_USB_FREEBSD = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_VSOCK = 0x10f + DLT_WATTSTOPPER_DLM = 0x107 + DLT_WIHART = 0xdf + DLT_WIRESHARK_UPPER_PDU = 0xfc + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DLT_ZWAVE_R1_R2 = 0x105 + DLT_ZWAVE_R3 = 0x106 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EHE_DEAD_PRIORITY = -0x1 + EVFILT_AIO = -0x3 + EVFILT_EMPTY = -0xd + EVFILT_FS = -0x9 + EVFILT_LIO = -0xa + EVFILT_PROC = -0x5 + EVFILT_PROCDESC = -0x8 + EVFILT_READ = -0x1 + EVFILT_SENDFILE = -0xc + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xd + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xb + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EVNAMEMAP_NAME_SIZE = 0x40 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DROP = 0x1000 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_FLAG2 = 0x4000 + EV_FORCEONESHOT = 0x100 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTATTR_MAXNAMELEN = 0xff + EXTATTR_NAMESPACE_EMPTY = 0x0 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_NONE = -0xc8 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_ADD_SEALS = 0x13 + F_CANCEL = 0x5 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xb + F_GETOWN = 0x5 + F_GET_SEALS = 0x14 + F_ISUNIONSTACK = 0x15 + F_KINFO = 0x16 + F_OGETLK = 0x7 + F_OK = 0x0 + F_OSETLK = 0x8 + F_OSETLKW = 0x9 + F_RDAHEAD = 0x10 + F_RDLCK = 0x1 + F_READAHEAD = 0xf + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0xc + F_SETLKW = 0xd + F_SETLK_REMOTE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_UNLCKSYS = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFCAP_WOL_MAGIC = 0x2000 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x218f72 + IFF_CANTCONFIG = 0x10000 + IFF_DEBUG = 0x4 + IFF_DRV_OACTIVE = 0x400 + IFF_DRV_RUNNING = 0x40 + IFF_DYING = 0x200000 + IFF_KNOWSEPOCH = 0x20 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOGROUP = 0x800000 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RENAMING = 0x400000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_IEEE1394 = 0x90 + IFT_INFINIBAND = 0xc7 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_PPP = 0x17 + IFT_PROPVIRTUAL = 0x35 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_NETMASK_DEFAULT = 0xffffff00 + IN_RFC3021_MASK = 0xfffffffe + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HIP = 0x8b + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MEAS = 0x13 + IPPROTO_MH = 0x87 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OLD_DIVERT = 0xfe + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_RESERVED_253 = 0xfd + IPPROTO_RESERVED_254 = 0xfe + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEND = 0x103 + IPPROTO_SHIM6 = 0x8c + IPPROTO_SKIP = 0x39 + IPPROTO_SPACER = 0x7fff + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDANY = 0x40 + IPV6_BINDMULTI = 0x41 + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FLOWID = 0x43 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_LEN = 0x14 + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FLOWTYPE = 0x44 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_ORIGDSTADDR = 0x48 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVFLOWID = 0x46 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVORIGDSTADDR = 0x48 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRSSBUCKETID = 0x47 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RSSBUCKETID = 0x45 + IPV6_RSS_LISTEN_BUCKET = 0x42 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BINDANY = 0x18 + IP_BINDMULTI = 0x19 + IP_BLOCK_SOURCE = 0x48 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x43 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET3 = 0x31 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FLOWID = 0x5a + IP_FLOWTYPE = 0x5b + IP_FW3 = 0x30 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_NAT_CFG = 0x38 + IP_FW_NAT_DEL = 0x39 + IP_FW_NAT_GET_CONFIG = 0x3a + IP_FW_NAT_GET_LOG = 0x3b + IP_FW_RESETLOG = 0x37 + IP_FW_TABLE_ADD = 0x28 + IP_FW_TABLE_DEL = 0x29 + IP_FW_TABLE_FLUSH = 0x2a + IP_FW_TABLE_GETSIZE = 0x2b + IP_FW_TABLE_LIST = 0x2c + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_ONESBCAST = 0x17 + IP_OPTIONS = 0x1 + IP_ORIGDSTADDR = 0x1b + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVFLOWID = 0x5d + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVORIGDSTADDR = 0x1b + IP_RECVRETOPTS = 0x6 + IP_RECVRSSBUCKETID = 0x5e + IP_RECVTOS = 0x44 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSSBUCKETID = 0x5c + IP_RSS_LISTEN_BUCKET = 0x1a + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b + ISIG = 0x80 + ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_PROTECT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MAP_32BIT = 0x80000 + MAP_ALIGNED_SUPER = 0x1000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_EXCL = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GUARD = 0x2000 + MAP_HASSEMAPHORE = 0x200 + MAP_NOCORE = 0x20000 + MAP_NOSYNC = 0x800 + MAP_PREFAULT_READ = 0x40000 + MAP_PRIVATE = 0x2 + MAP_RESERVED0020 = 0x20 + MAP_RESERVED0040 = 0x40 + MAP_RESERVED0080 = 0x80 + MAP_RESERVED0100 = 0x100 + MAP_SHARED = 0x1 + MAP_STACK = 0x400 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0xfc000000 + MFD_HUGE_SHIFT = 0x1a + MNT_ACLS = 0x8000000 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x200000000 + MNT_BYFSID = 0x8000000 + MNT_CMDFLAGS = 0x300d0f0000 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_EMPTYDIR = 0x2000000000 + MNT_EXKERB = 0x800 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x20000000 + MNT_EXRDONLY = 0x80 + MNT_EXTLS = 0x4000000000 + MNT_EXTLSCERT = 0x8000000000 + MNT_EXTLSCERTUSER = 0x10000000000 + MNT_FORCE = 0x80000 + MNT_GJOURNAL = 0x2000000 + MNT_IGNORE = 0x800000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NFS4ACLS = 0x10 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NOCOVER = 0x1000000000 + MNT_NOEXEC = 0x4 + MNT_NONBUSY = 0x4000000 + MNT_NOSUID = 0x8 + MNT_NOSYMFOLLOW = 0x400000 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x1000000 + MNT_SOFTDEP = 0x200000 + MNT_SUIDDIR = 0x100000 + MNT_SUJ = 0x100000000 + MNT_SUSPEND = 0x4 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UNTRUSTED = 0x800000000 + MNT_UPDATE = 0x10000 + MNT_UPDATEMASK = 0xad8d0807e + MNT_USER = 0x8000 + MNT_VERIFIED = 0x400000000 + MNT_VISFLAGMASK = 0xffef0ffff + MNT_WAIT = 0x1 + MSG_CMSG_CLOEXEC = 0x40000 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_NBIO = 0x4000 + MSG_NOSIGNAL = 0x20000 + MSG_NOTIFICATION = 0x2000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x80000 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLISTL = 0x5 + NET_RT_IFMALIST = 0x4 + NET_RT_NHGRP = 0x7 + NET_RT_NHOP = 0x6 + NFDBITS = 0x40 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ABSTIME = 0x10 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_CLOSE = 0x100 + NOTE_CLOSE_WRITE = 0x200 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FILE_POLL = 0x2 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MSECONDS = 0x2 + NOTE_NSECONDS = 0x8 + NOTE_OPEN = 0x80 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_READ = 0x400 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x4 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x100000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x1000000 + O_EMPTY_PATH = 0x2000000 + O_EXCL = 0x800 + O_EXEC = 0x40000 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_PATH = 0x400000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_TTY_INIT = 0x80000 + O_VERIFY = 0x200000 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_COREDUMP = 0x1d + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FIXEDMTU = 0x80000 + RTF_FMASK = 0x1004d808 + RTF_GATEWAY = 0x2 + RTF_GWFLAG_COMPAT = 0x80000000 + RTF_HOST = 0x4 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_STICKY = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RTV_WEIGHT = 0x100 + RT_ALL_FIBS = -0x1 + RT_BLACKHOLE = 0x40 + RT_DEFAULT_FIB = 0x0 + RT_DEFAULT_WEIGHT = 0x1 + RT_HAS_GW = 0x80 + RT_HAS_HEADER = 0x10 + RT_HAS_HEADER_BIT = 0x4 + RT_L2_ME = 0x4 + RT_L2_ME_BIT = 0x2 + RT_LLE_CACHE = 0x100 + RT_MAX_WEIGHT = 0xffffff + RT_MAY_LOOP = 0x8 + RT_MAY_LOOP_BIT = 0x3 + RT_REJECT = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_BINTIME = 0x4 + SCM_CREDS = 0x3 + SCM_CREDS2 = 0x8 + SCM_MONOTONIC = 0x6 + SCM_REALTIME = 0x5 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPHYADDR = 0x80206949 + SIOCGDRVSPEC = 0xc028697b + SIOCGETSGCNT = 0xc0207210 + SIOCGETVIFCNT = 0xc028720f + SIOCGHIWAT = 0x40047301 + SIOCGHWADDR = 0xc020693e + SIOCGI2C = 0xc020693d + SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0x8020692c + SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFIB = 0xc020695c + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMAC = 0xc0206926 + SIOCGIFMEDIA = 0xc0306938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRSSHASH = 0xc0186997 + SIOCGIFRSSKEY = 0xc0946996 + SIOCGIFSTATUS = 0xc331693b + SIOCGIFXMEDIA = 0xc030698b + SIOCGLANPCP = 0xc0206998 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCGTUNFIB = 0xc020695e + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSDRVSPEC = 0x8028697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDESCR = 0x80206929 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFIB = 0x8020695d + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206927 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFRVNET = 0xc020695b + SIOCSIFVNET = 0xc020695a + SIOCSLANPCP = 0x80206999 + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSTUNFIB = 0x8020695f + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_NONBLOCK = 0x20000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BINTIME = 0x2000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DOMAIN = 0x1019 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1009 + SO_LINGER = 0x80 + SO_LISTENINCQLEN = 0x1013 + SO_LISTENQLEN = 0x1012 + SO_LISTENQLIMIT = 0x1011 + SO_MAX_PACING_RATE = 0x1018 + SO_NOSIGPIPE = 0x800 + SO_NO_DDP = 0x8000 + SO_NO_OFFLOAD = 0x4000 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1010 + SO_PROTOCOL = 0x1016 + SO_PROTOTYPE = 0x1016 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSEPORT_LB = 0x10000 + SO_SETFIB = 0x1014 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TS_BINTIME = 0x1 + SO_TS_CLOCK = 0x1017 + SO_TS_CLOCK_MAX = 0x3 + SO_TS_DEFAULT = 0x0 + SO_TS_MONOTONIC = 0x3 + SO_TS_REALTIME = 0x2 + SO_TS_REALTIME_MICRO = 0x0 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USER_COOKIE = 0x1015 + SO_VENDOR = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB3 = 0x4 + TABDLY = 0x4 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 + TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b + TCP_BBR_DRAIN_INC_EXTRA = 0x43c + TCP_BBR_DRAIN_PG = 0x42e + TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 + TCP_BBR_IWINTSO = 0x42b + TCP_BBR_LOWGAIN_FD = 0x436 + TCP_BBR_LOWGAIN_HALF = 0x435 + TCP_BBR_LOWGAIN_THRESH = 0x434 + TCP_BBR_MAX_RTO = 0x439 + TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 + TCP_BBR_ONE_RETRAN = 0x431 + TCP_BBR_PACE_CROSS = 0x442 + TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 + TCP_BBR_PACE_PER_SEC = 0x43e + TCP_BBR_PACE_SEG_MAX = 0x440 + TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 + TCP_BBR_PROBE_RTT_GAIN = 0x44d + TCP_BBR_PROBE_RTT_INT = 0x430 + TCP_BBR_PROBE_RTT_LEN = 0x44e + TCP_BBR_RACK_INIT_RATE = 0x458 + TCP_BBR_RACK_RTT_USE = 0x44a + TCP_BBR_RECFORCE = 0x42c + TCP_BBR_REC_OVER_HPTS = 0x43a + TCP_BBR_RETRAN_WTSO = 0x44b + TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f + TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d + TCP_BBR_STARTUP_LOSS_EXIT = 0x432 + TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 + TCP_BBR_UNLIMITED = 0x43b + TCP_BBR_USEDEL_RATE = 0x437 + TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_USE_RACK_RR = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 + TCP_CA_NAME_MAX = 0x10 + TCP_CCALGOOPT = 0x41 + TCP_CONGESTION = 0x40 + TCP_DATA_AFTER_CLOSE = 0x44c + TCP_DEFER_OPTIONS = 0x470 + TCP_DELACK = 0x48 + TCP_FASTOPEN = 0x401 + TCP_FASTOPEN_MAX_COOKIE_LEN = 0x10 + TCP_FASTOPEN_MIN_COOKIE_LEN = 0x4 + TCP_FASTOPEN_PSK_LEN = 0x10 + TCP_FAST_RSM_HACK = 0x471 + TCP_FIN_IS_RST = 0x49 + TCP_FUNCTION_BLK = 0x2000 + TCP_FUNCTION_NAME_LEN_MAX = 0x20 + TCP_HDWR_RATE_CAP = 0x46a + TCP_HDWR_UP_ONLY = 0x46c + TCP_IDLE_REDUCE = 0x46 + TCP_INFO = 0x20 + TCP_IWND_NB = 0x2b + TCP_IWND_NSEG = 0x2c + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x80 + TCP_KEEPINTVL = 0x200 + TCP_LOG = 0x22 + TCP_LOGBUF = 0x23 + TCP_LOGDUMP = 0x25 + TCP_LOGDUMPID = 0x26 + TCP_LOGID = 0x24 + TCP_LOGID_CNT = 0x2e + TCP_LOG_ID_LEN = 0x40 + TCP_LOG_LIMIT = 0x4a + TCP_LOG_TAG = 0x2f + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXPEAKRATE = 0x45 + TCP_MAXSEG = 0x2 + TCP_MAXUNACKTIME = 0x44 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NO_PRR = 0x462 + TCP_PACING_RATE_CAP = 0x46b + TCP_PCAP_IN = 0x1000 + TCP_PCAP_OUT = 0x800 + TCP_PERF_INFO = 0x4e + TCP_PROC_ACCOUNTING = 0x4c + TCP_RACK_ABC_VAL = 0x46d + TCP_RACK_CHEAT_NOT_CONF_RATE = 0x459 + TCP_RACK_DO_DETECTION = 0x449 + TCP_RACK_EARLY_RECOV = 0x423 + TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_FORCE_MSEG = 0x45d + TCP_RACK_GP_INCREASE = 0x446 + TCP_RACK_GP_INCREASE_CA = 0x45a + TCP_RACK_GP_INCREASE_REC = 0x45c + TCP_RACK_GP_INCREASE_SS = 0x45b + TCP_RACK_IDLE_REDUCE_HIGH = 0x444 + TCP_RACK_MBUF_QUEUE = 0x41a + TCP_RACK_MEASURE_CNT = 0x46f + TCP_RACK_MIN_PACE = 0x445 + TCP_RACK_MIN_PACE_SEG = 0x446 + TCP_RACK_MIN_TO = 0x422 + TCP_RACK_NONRXT_CFG_RATE = 0x463 + TCP_RACK_NO_PUSH_AT_MAX = 0x466 + TCP_RACK_PACE_ALWAYS = 0x41f + TCP_RACK_PACE_MAX_SEG = 0x41e + TCP_RACK_PACE_RATE_CA = 0x45e + TCP_RACK_PACE_RATE_REC = 0x460 + TCP_RACK_PACE_RATE_SS = 0x45f + TCP_RACK_PACE_REDUCE = 0x41d + TCP_RACK_PACE_TO_FILL = 0x467 + TCP_RACK_PACING_BETA = 0x472 + TCP_RACK_PACING_BETA_ECN = 0x473 + TCP_RACK_PKT_DELAY = 0x428 + TCP_RACK_PROFILE = 0x469 + TCP_RACK_PROP = 0x41b + TCP_RACK_PROP_RATE = 0x420 + TCP_RACK_PRR_SENDALOT = 0x421 + TCP_RACK_REORD_FADE = 0x426 + TCP_RACK_REORD_THRESH = 0x425 + TCP_RACK_RR_CONF = 0x459 + TCP_RACK_TIMER_SLOP = 0x474 + TCP_RACK_TLP_INC_VAR = 0x429 + TCP_RACK_TLP_REDUCE = 0x41c + TCP_RACK_TLP_THRESH = 0x427 + TCP_RACK_TLP_USE = 0x447 + TCP_REC_ABC_VAL = 0x46e + TCP_REMOTE_UDP_ENCAPS_PORT = 0x47 + TCP_REUSPORT_LB_NUMA = 0x402 + TCP_REUSPORT_LB_NUMA_CURDOM = -0x1 + TCP_REUSPORT_LB_NUMA_NODOM = -0x2 + TCP_RXTLS_ENABLE = 0x29 + TCP_RXTLS_MODE = 0x2a + TCP_SHARED_CWND_ALLOWED = 0x4b + TCP_SHARED_CWND_ENABLE = 0x464 + TCP_SHARED_CWND_TIME_LIMIT = 0x468 + TCP_STATS = 0x21 + TCP_TIMELY_DYN_ADJ = 0x465 + TCP_TLS_MODE_IFNET = 0x2 + TCP_TLS_MODE_NONE = 0x0 + TCP_TLS_MODE_SW = 0x1 + TCP_TLS_MODE_TOE = 0x3 + TCP_TXTLS_ENABLE = 0x27 + TCP_TXTLS_MODE = 0x28 + TCP_USER_LOG = 0x30 + TCP_USE_CMP_ACKS = 0x4d + TCP_VENDOR = 0x80000000 + TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGPTN = 0x4004740f + TIOCGSID = 0x40047463 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DCD = 0x40 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMASTER = 0x2000741c + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WEXITED = 0x10 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WNOWAIT = 0x8 + WSTOPPED = 0x2 + WTRAPPED = 0x20 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x59) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x55) + ECAPMODE = syscall.Errno(0x5e) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDOOFUS = syscall.Errno(0x58) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x56) + EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x61) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5a) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x57) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCAPABLE = syscall.Errno(0x5d) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5f) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x60) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5c) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGLIBRT = syscall.Signal(0x21) + SIGLWP = syscall.Signal(0x20) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "ECANCELED", "operation canceled"}, + {86, "EILSEQ", "illegal byte sequence"}, + {87, "ENOATTR", "attribute not found"}, + {88, "EDOOFUS", "programming error"}, + {89, "EBADMSG", "bad message"}, + {90, "EMULTIHOP", "multihop attempted"}, + {91, "ENOLINK", "link has been severed"}, + {92, "EPROTO", "protocol error"}, + {93, "ENOTCAPABLE", "capabilities insufficient"}, + {94, "ECAPMODE", "not permitted in capability mode"}, + {95, "ENOTRECOVERABLE", "state not recoverable"}, + {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "unknown signal"}, + {33, "SIGLIBRT", "unknown signal"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index dfa9bd9384e22..785d693eb3285 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -140,6 +140,306 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUDIT_ADD = 0x3eb + AUDIT_ADD_RULE = 0x3f3 + AUDIT_ALWAYS = 0x2 + AUDIT_ANOM_ABEND = 0x6a5 + AUDIT_ANOM_CREAT = 0x6a7 + AUDIT_ANOM_LINK = 0x6a6 + AUDIT_ANOM_PROMISCUOUS = 0x6a4 + AUDIT_ARCH = 0xb + AUDIT_ARCH_AARCH64 = 0xc00000b7 + AUDIT_ARCH_ALPHA = 0xc0009026 + AUDIT_ARCH_ARCOMPACT = 0x4000005d + AUDIT_ARCH_ARCOMPACTBE = 0x5d + AUDIT_ARCH_ARCV2 = 0x400000c3 + AUDIT_ARCH_ARCV2BE = 0xc3 + AUDIT_ARCH_ARM = 0x40000028 + AUDIT_ARCH_ARMEB = 0x28 + AUDIT_ARCH_C6X = 0x4000008c + AUDIT_ARCH_C6XBE = 0x8c + AUDIT_ARCH_CRIS = 0x4000004c + AUDIT_ARCH_CSKY = 0x400000fc + AUDIT_ARCH_FRV = 0x5441 + AUDIT_ARCH_H8300 = 0x2e + AUDIT_ARCH_HEXAGON = 0xa4 + AUDIT_ARCH_I386 = 0x40000003 + AUDIT_ARCH_IA64 = 0xc0000032 + AUDIT_ARCH_LOONGARCH32 = 0x40000102 + AUDIT_ARCH_LOONGARCH64 = 0xc0000102 + AUDIT_ARCH_M32R = 0x58 + AUDIT_ARCH_M68K = 0x4 + AUDIT_ARCH_MICROBLAZE = 0xbd + AUDIT_ARCH_MIPS = 0x8 + AUDIT_ARCH_MIPS64 = 0x80000008 + AUDIT_ARCH_MIPS64N32 = 0xa0000008 + AUDIT_ARCH_MIPSEL = 0x40000008 + AUDIT_ARCH_MIPSEL64 = 0xc0000008 + AUDIT_ARCH_MIPSEL64N32 = 0xe0000008 + AUDIT_ARCH_NDS32 = 0x400000a7 + AUDIT_ARCH_NDS32BE = 0xa7 + AUDIT_ARCH_NIOS2 = 0x40000071 + AUDIT_ARCH_OPENRISC = 0x5c + AUDIT_ARCH_PARISC = 0xf + AUDIT_ARCH_PARISC64 = 0x8000000f + AUDIT_ARCH_PPC = 0x14 + AUDIT_ARCH_PPC64 = 0x80000015 + AUDIT_ARCH_PPC64LE = 0xc0000015 + AUDIT_ARCH_RISCV32 = 0x400000f3 + AUDIT_ARCH_RISCV64 = 0xc00000f3 + AUDIT_ARCH_S390 = 0x16 + AUDIT_ARCH_S390X = 0x80000016 + AUDIT_ARCH_SH = 0x2a + AUDIT_ARCH_SH64 = 0x8000002a + AUDIT_ARCH_SHEL = 0x4000002a + AUDIT_ARCH_SHEL64 = 0xc000002a + AUDIT_ARCH_SPARC = 0x2 + AUDIT_ARCH_SPARC64 = 0x8000002b + AUDIT_ARCH_TILEGX = 0xc00000bf + AUDIT_ARCH_TILEGX32 = 0x400000bf + AUDIT_ARCH_TILEPRO = 0x400000bc + AUDIT_ARCH_UNICORE = 0x4000006e + AUDIT_ARCH_X86_64 = 0xc000003e + AUDIT_ARCH_XTENSA = 0x5e + AUDIT_ARG0 = 0xc8 + AUDIT_ARG1 = 0xc9 + AUDIT_ARG2 = 0xca + AUDIT_ARG3 = 0xcb + AUDIT_AVC = 0x578 + AUDIT_AVC_PATH = 0x57a + AUDIT_BITMASK_SIZE = 0x40 + AUDIT_BIT_MASK = 0x8000000 + AUDIT_BIT_TEST = 0x48000000 + AUDIT_BPF = 0x536 + AUDIT_BPRM_FCAPS = 0x529 + AUDIT_CAPSET = 0x52a + AUDIT_CLASS_CHATTR = 0x2 + AUDIT_CLASS_CHATTR_32 = 0x3 + AUDIT_CLASS_DIR_WRITE = 0x0 + AUDIT_CLASS_DIR_WRITE_32 = 0x1 + AUDIT_CLASS_READ = 0x4 + AUDIT_CLASS_READ_32 = 0x5 + AUDIT_CLASS_SIGNAL = 0x8 + AUDIT_CLASS_SIGNAL_32 = 0x9 + AUDIT_CLASS_WRITE = 0x6 + AUDIT_CLASS_WRITE_32 = 0x7 + AUDIT_COMPARE_AUID_TO_EUID = 0x10 + AUDIT_COMPARE_AUID_TO_FSUID = 0xe + AUDIT_COMPARE_AUID_TO_OBJ_UID = 0x5 + AUDIT_COMPARE_AUID_TO_SUID = 0xf + AUDIT_COMPARE_EGID_TO_FSGID = 0x17 + AUDIT_COMPARE_EGID_TO_OBJ_GID = 0x4 + AUDIT_COMPARE_EGID_TO_SGID = 0x18 + AUDIT_COMPARE_EUID_TO_FSUID = 0x12 + AUDIT_COMPARE_EUID_TO_OBJ_UID = 0x3 + AUDIT_COMPARE_EUID_TO_SUID = 0x11 + AUDIT_COMPARE_FSGID_TO_OBJ_GID = 0x9 + AUDIT_COMPARE_FSUID_TO_OBJ_UID = 0x8 + AUDIT_COMPARE_GID_TO_EGID = 0x14 + AUDIT_COMPARE_GID_TO_FSGID = 0x15 + AUDIT_COMPARE_GID_TO_OBJ_GID = 0x2 + AUDIT_COMPARE_GID_TO_SGID = 0x16 + AUDIT_COMPARE_SGID_TO_FSGID = 0x19 + AUDIT_COMPARE_SGID_TO_OBJ_GID = 0x7 + AUDIT_COMPARE_SUID_TO_FSUID = 0x13 + AUDIT_COMPARE_SUID_TO_OBJ_UID = 0x6 + AUDIT_COMPARE_UID_TO_AUID = 0xa + AUDIT_COMPARE_UID_TO_EUID = 0xb + AUDIT_COMPARE_UID_TO_FSUID = 0xc + AUDIT_COMPARE_UID_TO_OBJ_UID = 0x1 + AUDIT_COMPARE_UID_TO_SUID = 0xd + AUDIT_CONFIG_CHANGE = 0x519 + AUDIT_CWD = 0x51b + AUDIT_DAEMON_ABORT = 0x4b2 + AUDIT_DAEMON_CONFIG = 0x4b3 + AUDIT_DAEMON_END = 0x4b1 + AUDIT_DAEMON_START = 0x4b0 + AUDIT_DEL = 0x3ec + AUDIT_DEL_RULE = 0x3f4 + AUDIT_DEVMAJOR = 0x64 + AUDIT_DEVMINOR = 0x65 + AUDIT_DIR = 0x6b + AUDIT_DM_CTRL = 0x53a + AUDIT_DM_EVENT = 0x53b + AUDIT_EGID = 0x6 + AUDIT_EOE = 0x528 + AUDIT_EQUAL = 0x40000000 + AUDIT_EUID = 0x2 + AUDIT_EVENT_LISTENER = 0x537 + AUDIT_EXE = 0x70 + AUDIT_EXECVE = 0x51d + AUDIT_EXIT = 0x67 + AUDIT_FAIL_PANIC = 0x2 + AUDIT_FAIL_PRINTK = 0x1 + AUDIT_FAIL_SILENT = 0x0 + AUDIT_FANOTIFY = 0x533 + AUDIT_FD_PAIR = 0x525 + AUDIT_FEATURE_BITMAP_ALL = 0x7f + AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT = 0x1 + AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME = 0x2 + AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND = 0x8 + AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH = 0x4 + AUDIT_FEATURE_BITMAP_FILTER_FS = 0x40 + AUDIT_FEATURE_BITMAP_LOST_RESET = 0x20 + AUDIT_FEATURE_BITMAP_SESSIONID_FILTER = 0x10 + AUDIT_FEATURE_CHANGE = 0x530 + AUDIT_FEATURE_LOGINUID_IMMUTABLE = 0x1 + AUDIT_FEATURE_ONLY_UNSET_LOGINUID = 0x0 + AUDIT_FEATURE_VERSION = 0x1 + AUDIT_FIELD_COMPARE = 0x6f + AUDIT_FILETYPE = 0x6c + AUDIT_FILTERKEY = 0xd2 + AUDIT_FILTER_ENTRY = 0x2 + AUDIT_FILTER_EXCLUDE = 0x5 + AUDIT_FILTER_EXIT = 0x4 + AUDIT_FILTER_FS = 0x6 + AUDIT_FILTER_PREPEND = 0x10 + AUDIT_FILTER_TASK = 0x1 + AUDIT_FILTER_TYPE = 0x5 + AUDIT_FILTER_URING_EXIT = 0x7 + AUDIT_FILTER_USER = 0x0 + AUDIT_FILTER_WATCH = 0x3 + AUDIT_FIRST_KERN_ANOM_MSG = 0x6a4 + AUDIT_FIRST_USER_MSG = 0x44c + AUDIT_FIRST_USER_MSG2 = 0x834 + AUDIT_FSGID = 0x8 + AUDIT_FSTYPE = 0x1a + AUDIT_FSUID = 0x4 + AUDIT_GET = 0x3e8 + AUDIT_GET_FEATURE = 0x3fb + AUDIT_GID = 0x5 + AUDIT_GREATER_THAN = 0x20000000 + AUDIT_GREATER_THAN_OR_EQUAL = 0x60000000 + AUDIT_INODE = 0x66 + AUDIT_INTEGRITY_DATA = 0x708 + AUDIT_INTEGRITY_EVM_XATTR = 0x70e + AUDIT_INTEGRITY_HASH = 0x70b + AUDIT_INTEGRITY_METADATA = 0x709 + AUDIT_INTEGRITY_PCR = 0x70c + AUDIT_INTEGRITY_POLICY_RULE = 0x70f + AUDIT_INTEGRITY_RULE = 0x70d + AUDIT_INTEGRITY_STATUS = 0x70a + AUDIT_IPC = 0x517 + AUDIT_IPC_SET_PERM = 0x51f + AUDIT_KERNEL = 0x7d0 + AUDIT_KERNEL_OTHER = 0x524 + AUDIT_KERN_MODULE = 0x532 + AUDIT_LAST_FEATURE = 0x1 + AUDIT_LAST_KERN_ANOM_MSG = 0x707 + AUDIT_LAST_USER_MSG = 0x4af + AUDIT_LAST_USER_MSG2 = 0xbb7 + AUDIT_LESS_THAN = 0x10000000 + AUDIT_LESS_THAN_OR_EQUAL = 0x50000000 + AUDIT_LIST = 0x3ea + AUDIT_LIST_RULES = 0x3f5 + AUDIT_LOGIN = 0x3ee + AUDIT_LOGINUID = 0x9 + AUDIT_LOGINUID_SET = 0x18 + AUDIT_MAC_CALIPSO_ADD = 0x58a + AUDIT_MAC_CALIPSO_DEL = 0x58b + AUDIT_MAC_CIPSOV4_ADD = 0x57f + AUDIT_MAC_CIPSOV4_DEL = 0x580 + AUDIT_MAC_CONFIG_CHANGE = 0x57d + AUDIT_MAC_IPSEC_ADDSA = 0x583 + AUDIT_MAC_IPSEC_ADDSPD = 0x585 + AUDIT_MAC_IPSEC_DELSA = 0x584 + AUDIT_MAC_IPSEC_DELSPD = 0x586 + AUDIT_MAC_IPSEC_EVENT = 0x587 + AUDIT_MAC_MAP_ADD = 0x581 + AUDIT_MAC_MAP_DEL = 0x582 + AUDIT_MAC_POLICY_LOAD = 0x57b + AUDIT_MAC_STATUS = 0x57c + AUDIT_MAC_UNLBL_ALLOW = 0x57e + AUDIT_MAC_UNLBL_STCADD = 0x588 + AUDIT_MAC_UNLBL_STCDEL = 0x589 + AUDIT_MAKE_EQUIV = 0x3f7 + AUDIT_MAX_FIELDS = 0x40 + AUDIT_MAX_FIELD_COMPARE = 0x19 + AUDIT_MAX_KEY_LEN = 0x100 + AUDIT_MESSAGE_TEXT_MAX = 0x2170 + AUDIT_MMAP = 0x52b + AUDIT_MQ_GETSETATTR = 0x523 + AUDIT_MQ_NOTIFY = 0x522 + AUDIT_MQ_OPEN = 0x520 + AUDIT_MQ_SENDRECV = 0x521 + AUDIT_MSGTYPE = 0xc + AUDIT_NEGATE = 0x80000000 + AUDIT_NETFILTER_CFG = 0x52d + AUDIT_NETFILTER_PKT = 0x52c + AUDIT_NEVER = 0x0 + AUDIT_NLGRP_MAX = 0x1 + AUDIT_NOT_EQUAL = 0x30000000 + AUDIT_NR_FILTERS = 0x8 + AUDIT_OBJ_GID = 0x6e + AUDIT_OBJ_LEV_HIGH = 0x17 + AUDIT_OBJ_LEV_LOW = 0x16 + AUDIT_OBJ_PID = 0x526 + AUDIT_OBJ_ROLE = 0x14 + AUDIT_OBJ_TYPE = 0x15 + AUDIT_OBJ_UID = 0x6d + AUDIT_OBJ_USER = 0x13 + AUDIT_OPENAT2 = 0x539 + AUDIT_OPERATORS = 0x78000000 + AUDIT_PATH = 0x516 + AUDIT_PERM = 0x6a + AUDIT_PERM_ATTR = 0x8 + AUDIT_PERM_EXEC = 0x1 + AUDIT_PERM_READ = 0x4 + AUDIT_PERM_WRITE = 0x2 + AUDIT_PERS = 0xa + AUDIT_PID = 0x0 + AUDIT_POSSIBLE = 0x1 + AUDIT_PPID = 0x12 + AUDIT_PROCTITLE = 0x52f + AUDIT_REPLACE = 0x531 + AUDIT_SADDR_FAM = 0x71 + AUDIT_SECCOMP = 0x52e + AUDIT_SELINUX_ERR = 0x579 + AUDIT_SESSIONID = 0x19 + AUDIT_SET = 0x3e9 + AUDIT_SET_FEATURE = 0x3fa + AUDIT_SGID = 0x7 + AUDIT_SID_UNSET = 0xffffffff + AUDIT_SIGNAL_INFO = 0x3f2 + AUDIT_SOCKADDR = 0x51a + AUDIT_SOCKETCALL = 0x518 + AUDIT_STATUS_BACKLOG_LIMIT = 0x10 + AUDIT_STATUS_BACKLOG_WAIT_TIME = 0x20 + AUDIT_STATUS_BACKLOG_WAIT_TIME_ACTUAL = 0x80 + AUDIT_STATUS_ENABLED = 0x1 + AUDIT_STATUS_FAILURE = 0x2 + AUDIT_STATUS_LOST = 0x40 + AUDIT_STATUS_PID = 0x4 + AUDIT_STATUS_RATE_LIMIT = 0x8 + AUDIT_SUBJ_CLR = 0x11 + AUDIT_SUBJ_ROLE = 0xe + AUDIT_SUBJ_SEN = 0x10 + AUDIT_SUBJ_TYPE = 0xf + AUDIT_SUBJ_USER = 0xd + AUDIT_SUCCESS = 0x68 + AUDIT_SUID = 0x3 + AUDIT_SYSCALL = 0x514 + AUDIT_SYSCALL_CLASSES = 0x10 + AUDIT_TIME_ADJNTPVAL = 0x535 + AUDIT_TIME_INJOFFSET = 0x534 + AUDIT_TRIM = 0x3f6 + AUDIT_TTY = 0x527 + AUDIT_TTY_GET = 0x3f8 + AUDIT_TTY_SET = 0x3f9 + AUDIT_UID = 0x1 + AUDIT_UID_UNSET = 0xffffffff + AUDIT_UNUSED_BITS = 0x7fffc00 + AUDIT_URINGOP = 0x538 + AUDIT_USER = 0x3ed + AUDIT_USER_AVC = 0x453 + AUDIT_USER_TTY = 0x464 + AUDIT_VERSION_BACKLOG_LIMIT = 0x1 + AUDIT_VERSION_BACKLOG_WAIT_TIME = 0x2 + AUDIT_VERSION_LATEST = 0x7f + AUDIT_WATCH = 0x69 + AUDIT_WATCH_INS = 0x3ef + AUDIT_WATCH_LIST = 0x3f1 + AUDIT_WATCH_REM = 0x3f0 AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B110 = 0x3 @@ -538,6 +838,55 @@ const ( EFD_SEMAPHORE = 0x1 EFIVARFS_MAGIC = 0xde5e81e4 EFS_SUPER_MAGIC = 0x414a53 + EM_386 = 0x3 + EM_486 = 0x6 + EM_68K = 0x4 + EM_860 = 0x7 + EM_88K = 0x5 + EM_AARCH64 = 0xb7 + EM_ALPHA = 0x9026 + EM_ALTERA_NIOS2 = 0x71 + EM_ARCOMPACT = 0x5d + EM_ARCV2 = 0xc3 + EM_ARM = 0x28 + EM_BLACKFIN = 0x6a + EM_BPF = 0xf7 + EM_CRIS = 0x4c + EM_CSKY = 0xfc + EM_CYGNUS_M32R = 0x9041 + EM_CYGNUS_MN10300 = 0xbeef + EM_FRV = 0x5441 + EM_H8_300 = 0x2e + EM_HEXAGON = 0xa4 + EM_IA_64 = 0x32 + EM_LOONGARCH = 0x102 + EM_M32 = 0x1 + EM_M32R = 0x58 + EM_MICROBLAZE = 0xbd + EM_MIPS = 0x8 + EM_MIPS_RS3_LE = 0xa + EM_MIPS_RS4_BE = 0xa + EM_MN10300 = 0x59 + EM_NDS32 = 0xa7 + EM_NONE = 0x0 + EM_OPENRISC = 0x5c + EM_PARISC = 0xf + EM_PPC = 0x14 + EM_PPC64 = 0x15 + EM_RISCV = 0xf3 + EM_S390 = 0x16 + EM_S390_OLD = 0xa390 + EM_SH = 0x2a + EM_SPARC = 0x2 + EM_SPARC32PLUS = 0x12 + EM_SPARCV9 = 0x2b + EM_SPU = 0x17 + EM_TILEGX = 0xbf + EM_TILEPRO = 0xbc + EM_TI_C6000 = 0x8c + EM_UNICORE = 0x6e + EM_X86_64 = 0x3e + EM_XTENSA = 0x5e ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -2591,6 +2940,7 @@ const ( SOL_RAW = 0xff SOL_RDS = 0x114 SOL_RXRPC = 0x110 + SOL_SMC = 0x11e SOL_TCP = 0x6 SOL_TIPC = 0x10f SOL_TLS = 0x11a diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index e9d9997eeda96..039c4aa06c2cc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -912,7 +912,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +922,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +937,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +947,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +967,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +984,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1205,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,43 +1250,13 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), uintptr(dev>>32), 0) + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), uintptr(dev>>32), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1753,22 +1656,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1671,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index edd373b1a562e..0535d3cfdf2bd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -912,7 +912,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +922,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +937,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +947,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +967,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +984,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1205,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,22 +1250,7 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1347,21 +1265,6 @@ func mknodat(fd int, path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1753,22 +1656,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1671,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 82e9764b25710..1018b52217041 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -351,22 +351,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { @@ -404,6 +388,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data int) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { @@ -912,7 +912,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +922,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +937,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +947,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +967,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +984,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1205,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,43 +1250,13 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, uintptr(dev), uintptr(dev>>32)) if e1 != 0 { err = errnoErr(e1) } @@ -1753,22 +1656,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1671,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index a6479acd1fc84..3802f4b379a5d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -912,7 +912,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +922,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +937,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +947,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +967,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +984,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1205,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,22 +1250,7 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1347,21 +1265,6 @@ func mknodat(fd int, path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1753,22 +1656,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1671,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go new file mode 100644 index 0000000000000..8a2db7da9f3eb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -0,0 +1,1889 @@ +// go run mksyscall.go -tags freebsd,riscv64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_riscv64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build freebsd && riscv64 +// +build freebsd,riscv64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CapEnter() (err error) { + _, _, e1 := Syscall(SYS_CAP_ENTER, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func capRightsGet(version int, fd int, rightsp *CapRights) (err error) { + _, _, e1 := Syscall(SYS___CAP_RIGHTS_GET, uintptr(version), uintptr(fd), uintptr(unsafe.Pointer(rightsp))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func capRightsLimit(fd int, rightsp *CapRights) (err error) { + _, _, e1 := Syscall(SYS_CAP_RIGHTS_LIMIT, uintptr(fd), uintptr(unsafe.Pointer(rightsp)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_POSIX_FADVISE, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(fdat), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 59d5dfc209222..4e0d96107b9eb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 342d471d2eb1a..01636b838d30f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index e2e3d72c5b04f..ad99bc106a86f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 61ad5ca3c19b6..89dcc42747657 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go new file mode 100644 index 0000000000000..ee37aaa0c906a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go @@ -0,0 +1,394 @@ +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +package unix + +const ( + // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int + SYS_EXIT = 1 // { void sys_exit(int rval); } exit sys_exit_args void + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, struct rusage *rusage); } + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_BREAK = 17 // { caddr_t break(char *nsize); } + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, int flags, struct sockaddr * __restrict from, __socklen_t * __restrict fromlenaddr); } + SYS_ACCEPT = 30 // { int accept(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen); } + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); } + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } + SYS_UMASK = 60 // { int umask(int newmask); } + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); } + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } + SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, caddr_t val, int valsize); } + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, caddr_t val, int *avalsize); } + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, struct timezone *tzp); } + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen); } + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, struct timeval *olddelta); } + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, caddr_t arg); } + SYS_NLM_SYSCALL = 154 // { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); } + SYS_NFSSVC = 155 // { int nfssvc(int flag, caddr_t argp); } + SYS_LGETFH = 160 // { int lgetfh(char *fname, struct fhandle *fhp); } + SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, struct rtprio *rtp); } + SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } + SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } + SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_SEMGET = 221 // { int semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); } + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct itimerspec *value); } + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } + SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } + SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, struct timespec *interval); } + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, void *data); } + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, const sigset_t *set, sigset_t *oset); } + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec *timeout); } + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, siginfo_t *info); } + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, acl_type_t type); } + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, acl_type_t type); } + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, struct mac *mac_p); } + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, struct mac *mac_p); } + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, struct mac *mac_p); } + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, struct mac *mac_p); } + SYS_KENV = 390 // { int kenv(int what, const char *name, char *value, int len); } + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, u_long flags); } + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } + SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } + SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } + SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } + SYS_KSEM_TRYWAIT = 403 // { int ksem_trywait(semid_t id); } + SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, unsigned int value); } + SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, const char *name, int oflag, mode_t mode, unsigned int value); } + SYS_KSEM_UNLINK = 406 // { int ksem_unlink(const char *name); } + SYS_KSEM_GETVALUE = 407 // { int ksem_getvalue(semid_t id, int *val); } + SYS_KSEM_DESTROY = 408 // { int ksem_destroy(semid_t id); } + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } + SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, acl_type_t type); } + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, int *sig); } + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, int flags); } + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, u_int length); } + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_AIO_FSYNC = 465 // { int aio_fsync(int op, struct aiocb *aiocbp); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, lwpid_t lwpid, struct rtprio *rtp); } + SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, int whence); } + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, mode_t mode); } + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, cpusetid_t setid); } + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid); } + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *mask); } + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *mask); } + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, int flag); } + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_GSSD_SYSCALL = 505 // { int gssd_syscall(char *path); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, int fd, cap_rights_t *rightsp); } + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *sm); } + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, size_t namelen); } + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, off_t offset, off_t len); } + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, off_t len, int advice); } + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *info); } + SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, cap_rights_t *rightsp); } + SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, const u_long *cmds, size_t ncmds); } + SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, u_long *cmds, size_t maxcmds); } + SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, uint32_t fcntlrights); } + SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, uint32_t *fcntlrightsp); } + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, int namelen); } + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, int namelen); } + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, u_long flags, int atflag); } + SYS_ACCEPT4 = 541 // { int accept4(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen, int flags); } + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_AIO_MLOCK = 543 // { int aio_mlock(struct aiocb *aiocbp); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, int com, void *data); } + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } + SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } + SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 4eec078e52490..dea0c9a607d83 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -90,27 +90,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec - _ [8]byte -} - type Statfs_t struct { Version uint32 Type uint32 @@ -136,31 +115,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -181,14 +135,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -337,41 +283,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -432,6 +346,8 @@ type FpReg struct { Pad [64]uint8 } +type FpExtendedPrecision struct{} + type PtraceIoDesc struct { Op int32 Offs *byte @@ -444,8 +360,9 @@ type Kevent_t struct { Filter int16 Flags uint16 Fflags uint32 - Data int32 + Data int64 Udata *byte + Ext [4]uint64 } type FdSet struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7622904a532f4..da0ea0d608a89 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -86,26 +86,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec -} - type Statfs_t struct { Version uint32 Type uint32 @@ -131,31 +111,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -177,14 +132,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -333,41 +280,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -435,6 +350,8 @@ type FpReg struct { Spare [12]uint64 } +type FpExtendedPrecision struct{} + type PtraceIoDesc struct { Op int32 Offs *byte @@ -449,6 +366,7 @@ type Kevent_t struct { Fflags uint32 Data int64 Udata *byte + Ext [4]uint64 } type FdSet struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 19223ce8ecf90..da8f7404509c0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -33,7 +33,7 @@ type Timeval struct { _ [4]byte } -type Time_t int32 +type Time_t int64 type Rusage struct { Utime Timeval @@ -88,26 +88,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec -} - type Statfs_t struct { Version uint32 Type uint32 @@ -133,31 +113,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -179,14 +134,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -335,41 +282,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -386,15 +301,15 @@ type PtraceLwpInfoStruct struct { } type __Siginfo struct { - Signo int32 - Errno int32 - Code int32 - Pid int32 - Uid uint32 - Status int32 - Addr *byte - Value [4]byte - X_reason [32]byte + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [4]byte + _ [32]byte } type Sigset_t struct { @@ -402,16 +317,22 @@ type Sigset_t struct { } type Reg struct { - R [13]uint32 - R_sp uint32 - R_lr uint32 - R_pc uint32 - R_cpsr uint32 + R [13]uint32 + Sp uint32 + Lr uint32 + Pc uint32 + Cpsr uint32 } type FpReg struct { - Fpr_fpsr uint32 - Fpr [8][3]uint32 + Fpsr uint32 + Fpr [8]FpExtendedPrecision +} + +type FpExtendedPrecision struct { + Exponent uint32 + Mantissa_hi uint32 + Mantissa_lo uint32 } type PtraceIoDesc struct { @@ -426,8 +347,11 @@ type Kevent_t struct { Filter int16 Flags uint16 Fflags uint32 - Data int32 + _ [4]byte + Data int64 Udata *byte + _ [4]byte + Ext [4]uint64 } type FdSet struct { @@ -453,7 +377,7 @@ type ifMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Data ifData } @@ -464,7 +388,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -532,7 +455,7 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Metric int32 } @@ -543,7 +466,7 @@ type IfmaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 } type IfAnnounceMsghdr struct { @@ -560,7 +483,7 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte + _ uint16 Flags int32 Addrs int32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 8e3e33f679058..d69988e5e58e9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -86,26 +86,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec -} - type Statfs_t struct { Version uint32 Type uint32 @@ -131,31 +111,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -177,14 +132,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -333,39 +280,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -413,6 +330,8 @@ type FpReg struct { _ [8]byte } +type FpExtendedPrecision struct{} + type PtraceIoDesc struct { Op int32 Offs *byte @@ -427,6 +346,7 @@ type Kevent_t struct { Fflags uint32 Data int64 Udata *byte + Ext [4]uint64 } type FdSet struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go new file mode 100644 index 0000000000000..d6fd9e88382e5 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -0,0 +1,626 @@ +// cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Time_t int64 + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type _Gid_t uint32 + +const ( + _statfsVersion = 0x20140518 + _dirblksiz = 0x400 +) + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 +} + +type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]byte + Mntfromname [1024]byte + Mntonname [1024]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 + Sysid int32 + _ [4]byte +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Pad0 uint8 + Namlen uint16 + Pad1 uint16 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [46]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x58 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type PtraceLwpInfoStruct struct { + Lwpid int32 + Event int32 + Flags int32 + Sigmask Sigset_t + Siglist Sigset_t + Siginfo __Siginfo + Tdname [20]int8 + Child_pid int32 + Syscall_code uint32 + Syscall_narg uint32 +} + +type __Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [8]byte + _ [40]byte +} + +type Sigset_t struct { + Val [4]uint32 +} + +type Reg struct { + Ra uint64 + Sp uint64 + Gp uint64 + Tp uint64 + T [7]uint64 + S [12]uint64 + A [8]uint64 + Sepc uint64 + Sstatus uint64 +} + +type FpReg struct { + X [32][2]uint64 + Fcsr uint64 +} + +type FpExtendedPrecision struct{} + +type PtraceIoDesc struct { + Op int32 + Offs *byte + Addr *byte + Len uint64 +} + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte + Ext [4]uint64 +} + +type FdSet struct { + Bits [16]uint64 +} + +const ( + sizeofIfMsghdr = 0xa8 + SizeofIfMsghdr = 0xa8 + sizeofIfData = 0x98 + SizeofIfData = 0x98 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x98 + SizeofRtMetrics = 0x70 +) + +type ifMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ uint16 + Data ifData +} + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Data IfData +} + +type ifData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Datalen uint16 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Hwassist uint64 + _ [8]byte + _ [16]byte +} + +type IfData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Spare_char1 uint8 + Spare_char2 uint8 + Datalen uint8 + Mtu uint64 + Metric uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Hwassist uint64 + Epoch int64 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ uint16 + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ uint16 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ uint16 + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Fmask int32 + Inits uint64 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint64 + Mtu uint64 + Hopcount uint64 + Expire uint64 + Recvpipe uint64 + Sendpipe uint64 + Ssthresh uint64 + Rtt uint64 + Rttvar uint64 + Pksent uint64 + Weight uint64 + Nhidx uint64 + Filler [2]uint64 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfZbuf = 0x18 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x20 + SizeofBpfZbufHeader = 0x20 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfZbuf struct { + Bufa *byte + Bufb *byte + Buflen uint64 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [6]byte +} + +type BpfZbufHeader struct { + Kernel_gen uint32 + Kernel_len uint32 + User_gen uint32 + _ [5]uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_EACCESS = 0x100 + AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLINIGNEOF = 0x2000 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type CapRights struct { + Rights [2]uint64 +} + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Spare int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index e62611e5331a9..86984798754dd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -5594,3 +5594,8 @@ const ( FR_ACT_UNREACHABLE = 0x7 FR_ACT_PROHIBIT = 0x8 ) + +const ( + AUDIT_NLGRP_NONE = 0x0 + AUDIT_NLGRP_READLOG = 0x1 +) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 636e5de60e32f..be3ec2bd4676d 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -861,6 +861,7 @@ const socket_error = uintptr(^uint32(0)) //sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar +//sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -1045,6 +1046,14 @@ func Connect(fd Handle, sa Sockaddr) (err error) { return connect(fd, ptr, n) } +func GetBestInterfaceEx(sa Sockaddr, pdwBestIfIndex *uint32) (err error) { + ptr, _, err := sa.sockaddr() + if err != nil { + return err + } + return getBestInterfaceEx(ptr, pdwBestIfIndex) +} + func Getsockname(fd Handle) (sa Sockaddr, err error) { var rsa RawSockaddrAny l := int32(unsafe.Sizeof(rsa)) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index e19471c6a852f..f9eaca528ed7d 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -160,6 +160,10 @@ const ( MAX_COMPUTERNAME_LENGTH = 15 + MAX_DHCPV6_DUID_LENGTH = 130 + + MAX_DNS_SUFFIX_STRING_LENGTH = 256 + TIME_ZONE_ID_UNKNOWN = 0 TIME_ZONE_ID_STANDARD = 1 @@ -2000,27 +2004,62 @@ type IpAdapterPrefix struct { } type IpAdapterAddresses struct { - Length uint32 - IfIndex uint32 - Next *IpAdapterAddresses - AdapterName *byte - FirstUnicastAddress *IpAdapterUnicastAddress - FirstAnycastAddress *IpAdapterAnycastAddress - FirstMulticastAddress *IpAdapterMulticastAddress - FirstDnsServerAddress *IpAdapterDnsServerAdapter - DnsSuffix *uint16 - Description *uint16 - FriendlyName *uint16 - PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte - PhysicalAddressLength uint32 - Flags uint32 - Mtu uint32 - IfType uint32 - OperStatus uint32 - Ipv6IfIndex uint32 - ZoneIndices [16]uint32 - FirstPrefix *IpAdapterPrefix - /* more fields might be present here. */ + Length uint32 + IfIndex uint32 + Next *IpAdapterAddresses + AdapterName *byte + FirstUnicastAddress *IpAdapterUnicastAddress + FirstAnycastAddress *IpAdapterAnycastAddress + FirstMulticastAddress *IpAdapterMulticastAddress + FirstDnsServerAddress *IpAdapterDnsServerAdapter + DnsSuffix *uint16 + Description *uint16 + FriendlyName *uint16 + PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte + PhysicalAddressLength uint32 + Flags uint32 + Mtu uint32 + IfType uint32 + OperStatus uint32 + Ipv6IfIndex uint32 + ZoneIndices [16]uint32 + FirstPrefix *IpAdapterPrefix + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + FirstWinsServerAddress *IpAdapterWinsServerAddress + FirstGatewayAddress *IpAdapterGatewayAddress + Ipv4Metric uint32 + Ipv6Metric uint32 + Luid uint64 + Dhcpv4Server SocketAddress + CompartmentId uint32 + NetworkGuid GUID + ConnectionType uint32 + TunnelType uint32 + Dhcpv6Server SocketAddress + Dhcpv6ClientDuid [MAX_DHCPV6_DUID_LENGTH]byte + Dhcpv6ClientDuidLength uint32 + Dhcpv6Iaid uint32 + FirstDnsSuffix *IpAdapterDNSSuffix +} + +type IpAdapterWinsServerAddress struct { + Length uint32 + Reserved uint32 + Next *IpAdapterWinsServerAddress + Address SocketAddress +} + +type IpAdapterGatewayAddress struct { + Length uint32 + Reserved uint32 + Next *IpAdapterGatewayAddress + Address SocketAddress +} + +type IpAdapterDNSSuffix struct { + Next *IpAdapterDNSSuffix + String [MAX_DNS_SUFFIX_STRING_LENGTH]uint16 } const ( diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 68f52c1e61e9e..678262cda177d 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -177,6 +177,7 @@ var ( procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") + procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -1539,6 +1540,14 @@ func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { return } +func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) { + r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetIfEntry(pIfRow *MibIfRow) (errcode error) { r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) if r0 != 0 { diff --git a/vendor/golang.org/x/text/AUTHORS b/vendor/golang.org/x/text/AUTHORS deleted file mode 100644 index 15167cd746c56..0000000000000 --- a/vendor/golang.org/x/text/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/text/CONTRIBUTORS b/vendor/golang.org/x/text/CONTRIBUTORS deleted file mode 100644 index 1c4577e968061..0000000000000 --- a/vendor/golang.org/x/text/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/text/encoding/ianaindex/tables.go b/vendor/golang.org/x/text/encoding/ianaindex/tables.go index cec6a0407bf93..921bb3b4b325d 100644 --- a/vendor/golang.org/x/text/encoding/ianaindex/tables.go +++ b/vendor/golang.org/x/text/encoding/ianaindex/tables.go @@ -141,6 +141,7 @@ const ( enc1018 enc1019 enc1020 + enc1021 enc2000 enc2001 enc2002 @@ -265,7 +266,7 @@ const ( numIANA ) -var ianaToMIB = []identifier.MIB{ // 257 elements +var ianaToMIB = []identifier.MIB{ // 258 elements // Entry 0 - 3F 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, 0x0010, 0x0011, 0x0012, @@ -286,27 +287,27 @@ var ianaToMIB = []identifier.MIB{ // 257 elements 0x03ed, 0x03ee, 0x03ef, 0x03f0, 0x03f1, 0x03f2, 0x03f3, 0x03f4, // Entry 80 - BF 0x03f5, 0x03f6, 0x03f7, 0x03f8, 0x03f9, 0x03fa, 0x03fb, 0x03fc, - 0x07d0, 0x07d1, 0x07d2, 0x07d3, 0x07d4, 0x07d5, 0x07d6, 0x07d7, - 0x07d8, 0x07d9, 0x07da, 0x07db, 0x07dc, 0x07dd, 0x07de, 0x07df, - 0x07e0, 0x07e1, 0x07e2, 0x07e3, 0x07e4, 0x07e5, 0x07e6, 0x07e7, - 0x07e8, 0x07e9, 0x07ea, 0x07eb, 0x07ec, 0x07ed, 0x07ee, 0x07ef, - 0x07f0, 0x07f1, 0x07f2, 0x07f3, 0x07f4, 0x07f5, 0x07f6, 0x07f7, - 0x07f8, 0x07f9, 0x07fa, 0x07fb, 0x07fc, 0x07fd, 0x07fe, 0x07ff, - 0x0800, 0x0801, 0x0802, 0x0803, 0x0804, 0x0805, 0x0806, 0x0807, + 0x03fd, 0x07d0, 0x07d1, 0x07d2, 0x07d3, 0x07d4, 0x07d5, 0x07d6, + 0x07d7, 0x07d8, 0x07d9, 0x07da, 0x07db, 0x07dc, 0x07dd, 0x07de, + 0x07df, 0x07e0, 0x07e1, 0x07e2, 0x07e3, 0x07e4, 0x07e5, 0x07e6, + 0x07e7, 0x07e8, 0x07e9, 0x07ea, 0x07eb, 0x07ec, 0x07ed, 0x07ee, + 0x07ef, 0x07f0, 0x07f1, 0x07f2, 0x07f3, 0x07f4, 0x07f5, 0x07f6, + 0x07f7, 0x07f8, 0x07f9, 0x07fa, 0x07fb, 0x07fc, 0x07fd, 0x07fe, + 0x07ff, 0x0800, 0x0801, 0x0802, 0x0803, 0x0804, 0x0805, 0x0806, // Entry C0 - FF - 0x0808, 0x0809, 0x080a, 0x080b, 0x080c, 0x080d, 0x080e, 0x080f, - 0x0810, 0x0811, 0x0812, 0x0813, 0x0814, 0x0815, 0x0816, 0x0817, - 0x0818, 0x0819, 0x081a, 0x081b, 0x081c, 0x081d, 0x081e, 0x081f, - 0x0820, 0x0821, 0x0822, 0x0823, 0x0824, 0x0825, 0x0826, 0x0827, - 0x0828, 0x0829, 0x082a, 0x082b, 0x082c, 0x082d, 0x082e, 0x082f, - 0x0830, 0x0831, 0x0832, 0x0833, 0x0834, 0x0835, 0x0836, 0x0837, - 0x0838, 0x0839, 0x083a, 0x083b, 0x083c, 0x083d, 0x08ca, 0x08cb, - 0x08cc, 0x08cd, 0x08ce, 0x08cf, 0x08d0, 0x08d1, 0x08d2, 0x08d3, + 0x0807, 0x0808, 0x0809, 0x080a, 0x080b, 0x080c, 0x080d, 0x080e, + 0x080f, 0x0810, 0x0811, 0x0812, 0x0813, 0x0814, 0x0815, 0x0816, + 0x0817, 0x0818, 0x0819, 0x081a, 0x081b, 0x081c, 0x081d, 0x081e, + 0x081f, 0x0820, 0x0821, 0x0822, 0x0823, 0x0824, 0x0825, 0x0826, + 0x0827, 0x0828, 0x0829, 0x082a, 0x082b, 0x082c, 0x082d, 0x082e, + 0x082f, 0x0830, 0x0831, 0x0832, 0x0833, 0x0834, 0x0835, 0x0836, + 0x0837, 0x0838, 0x0839, 0x083a, 0x083b, 0x083c, 0x083d, 0x08ca, + 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x08cf, 0x08d0, 0x08d1, 0x08d2, // Entry 100 - 13F - 0x08d4, -} // Size: 538 bytes + 0x08d3, 0x08d4, +} // Size: 540 bytes -var ianaNames = []string{ // 257 elements +var ianaNames = []string{ // 258 elements "US-ASCII", "\vISO-8859-1ISO_8859-1:1987", "\vISO-8859-2ISO_8859-2:1987", @@ -443,6 +444,7 @@ var ianaNames = []string{ // 257 elements "UTF-32BE", "UTF-32LE", "BOCU-1", + "UTF-7-IMAP", "ISO-8859-1-Windows-3.0-Latin-1", "ISO-8859-1-Windows-3.1-Latin-1", "ISO-8859-2-Windows-Latin-2", @@ -564,9 +566,9 @@ var ianaNames = []string{ // 257 elements "windows-1258", "TIS-620", "CP50220", -} // Size: 7088 bytes +} // Size: 7114 bytes -var mibNames = []string{ // 257 elements +var mibNames = []string{ // 258 elements "ASCII", "ISOLatin1", "ISOLatin2", @@ -703,6 +705,7 @@ var mibNames = []string{ // 257 elements "UTF32BE", "UTF32LE", "BOCU-1", + "UTF7IMAP", "Windows30Latin1", "Windows31Latin1", "Windows31Latin2", @@ -824,7 +827,7 @@ var mibNames = []string{ // 257 elements "windows1258", "TIS620", "CP50220", -} // Size: 6776 bytes +} // Size: 6800 bytes // TODO: Instead of using a map, we could use binary search strings doing // on-the fly lower-casing per character. This allows to always avoid @@ -1692,6 +1695,10 @@ var ianaAliases = map[string]int{ "csbocu1": enc1020, "csBOCU-1": enc1020, "csbocu-1": enc1020, + "UTF-7-IMAP": enc1021, + "utf-7-imap": enc1021, + "csUTF7IMAP": enc1021, + "csutf7imap": enc1021, "ISO-8859-1-Windows-3.0-Latin-1": enc2000, "iso-8859-1-windows-3.0-latin-1": enc2000, "csWindows30Latin1": enc2000, @@ -2345,4 +2352,4 @@ var ianaAliases = map[string]int{ "cscp50220": enc2260, } -// Total table size 14402 bytes (14KiB); checksum: CEBAA10C +// Total table size 14454 bytes (14KiB); checksum: 9095144D diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/mib.go b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go index fc7df1bc716e1..351fb86e298b6 100644 --- a/vendor/golang.org/x/text/encoding/internal/identifier/mib.go +++ b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go @@ -905,6 +905,14 @@ const ( // https://www.unicode.org/notes/tn6/ BOCU1 MIB = 1020 + // UTF7IMAP is the MIB identifier with IANA name UTF-7-IMAP. + // + // Note: This charset is used to encode Unicode in IMAP mailbox names; + // see section 5.1.3 of rfc3501 . It should never be used + // outside this context. A name has been assigned so that charset processing + // implementations can refer to it in a consistent way. + UTF7IMAP MIB = 1021 + // Windows30Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.0-Latin-1. // // Extended ISO 8859-1 Latin-1 for Windows 3.0. diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go index b89c45b03d842..0e0fabfd6b1ef 100644 --- a/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go +++ b/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go @@ -55,6 +55,8 @@ loop: // Microsoft's Code Page 936 extends GBK 1.0 to encode the euro sign U+20AC // as 0x80. The HTML5 specification at http://encoding.spec.whatwg.org/#gbk // says to treat "gbk" as Code Page 936. + // GBK’s decoder is gb18030’s decoder. https://encoding.spec.whatwg.org/#gbk-decoder + // If byte is 0x80, return code point U+20AC. https://encoding.spec.whatwg.org/#gb18030-decoder case c0 == 0x80: r, size = '€', 1 @@ -180,7 +182,9 @@ func (e gbkEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err // Microsoft's Code Page 936 extends GBK 1.0 to encode the euro sign U+20AC // as 0x80. The HTML5 specification at http://encoding.spec.whatwg.org/#gbk // says to treat "gbk" as Code Page 936. - if r == '€' { + // GBK’s encoder is gb18030’s encoder with its _is GBK_ set to true. https://encoding.spec.whatwg.org/#gbk-encoder + // If _is GBK_ is true and code point is U+20AC, return byte 0x80. https://encoding.spec.whatwg.org/#gb18030-encoder + if !e.gb18030 && r == '€' { r = 0x80 goto write1 } diff --git a/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go b/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go index 575cea8707b80..e5c53b1b3e07d 100644 --- a/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go +++ b/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go @@ -74,7 +74,7 @@ type AcceptRange struct { // AcceptRanges is a slice of AcceptRange values. For a given byte sequence b // -// AcceptRanges[First[b[0]]>>AcceptShift] +// AcceptRanges[First[b[0]]>>AcceptShift] // // will give the value of AcceptRange for the multi-byte UTF-8 sequence starting // at b[0]. diff --git a/vendor/golang.org/x/text/runes/runes.go b/vendor/golang.org/x/text/runes/runes.go index 71933696f592a..930e87fedb0f7 100644 --- a/vendor/golang.org/x/text/runes/runes.go +++ b/vendor/golang.org/x/text/runes/runes.go @@ -33,7 +33,7 @@ func In(rt *unicode.RangeTable) Set { return setFunc(func(r rune) bool { return unicode.Is(rt, r) }) } -// In creates a Set with a Contains method that returns true for all runes not +// NotIn creates a Set with a Contains method that returns true for all runes not // in the given RangeTable. func NotIn(rt *unicode.RangeTable) Set { return setFunc(func(r rune) bool { return !unicode.Is(rt, r) }) diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index e4c0811016c2a..9d2ae547b5ed4 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -193,14 +193,14 @@ func (p *paragraph) run() { // // At the end of this function: // -// - The member variable matchingPDI is set to point to the index of the -// matching PDI character for each isolate initiator character. If there is -// no matching PDI, it is set to the length of the input text. For other -// characters, it is set to -1. -// - The member variable matchingIsolateInitiator is set to point to the -// index of the matching isolate initiator character for each PDI character. -// If there is no matching isolate initiator, or the character is not a PDI, -// it is set to -1. +// - The member variable matchingPDI is set to point to the index of the +// matching PDI character for each isolate initiator character. If there is +// no matching PDI, it is set to the length of the input text. For other +// characters, it is set to -1. +// - The member variable matchingIsolateInitiator is set to point to the +// index of the matching isolate initiator character for each PDI character. +// If there is no matching isolate initiator, or the character is not a PDI, +// it is set to -1. func (p *paragraph) determineMatchingIsolates() { p.matchingPDI = make([]int, p.Len()) p.matchingIsolateInitiator = make([]int, p.Len()) @@ -435,7 +435,7 @@ func maxLevel(a, b level) level { } // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, -// either L or R, for each isolating run sequence. +// either L or R, for each isolating run sequence. func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { length := len(indexes) types := make([]Class, length) @@ -495,9 +495,9 @@ func (s *isolatingRunSequence) resolveWeakTypes() { if t == NSM { s.types[i] = precedingCharacterType } else { - if t.in(LRI, RLI, FSI, PDI) { - precedingCharacterType = ON - } + // if t.in(LRI, RLI, FSI, PDI) { + // precedingCharacterType = ON + // } precedingCharacterType = t } } @@ -905,7 +905,7 @@ func (p *paragraph) getLevels(linebreaks []int) []level { // Lines are concatenated from left to right. So for example, the fifth // character from the left on the third line is // -// getReordering(linebreaks)[linebreaks[1] + 4] +// getReordering(linebreaks)[linebreaks[1] + 4] // // (linebreaks[1] is the position after the last character of the second // line, which is also the index of the first character on the third line, diff --git a/vendor/golang.org/x/text/unicode/bidi/trieval.go b/vendor/golang.org/x/text/unicode/bidi/trieval.go index 4c459c4b72e0e..6a796e2214c69 100644 --- a/vendor/golang.org/x/text/unicode/bidi/trieval.go +++ b/vendor/golang.org/x/text/unicode/bidi/trieval.go @@ -37,18 +37,6 @@ const ( unknownClass = ^Class(0) ) -var controlToClass = map[rune]Class{ - 0x202D: LRO, // LeftToRightOverride, - 0x202E: RLO, // RightToLeftOverride, - 0x202A: LRE, // LeftToRightEmbedding, - 0x202B: RLE, // RightToLeftEmbedding, - 0x202C: PDF, // PopDirectionalFormat, - 0x2066: LRI, // LeftToRightIsolate, - 0x2067: RLI, // RightToLeftIsolate, - 0x2068: FSI, // FirstStrongIsolate, - 0x2069: PDI, // PopDirectionalIsolate, -} - // A trie entry has the following bits: // 7..5 XOR mask for brackets // 4 1: Bracket open, 0: Bracket close diff --git a/vendor/golang.org/x/text/unicode/norm/forminfo.go b/vendor/golang.org/x/text/unicode/norm/forminfo.go index 526c7033ac464..d69ccb4f97611 100644 --- a/vendor/golang.org/x/text/unicode/norm/forminfo.go +++ b/vendor/golang.org/x/text/unicode/norm/forminfo.go @@ -110,10 +110,11 @@ func (p Properties) BoundaryAfter() bool { } // We pack quick check data in 4 bits: -// 5: Combines forward (0 == false, 1 == true) -// 4..3: NFC_QC Yes(00), No (10), or Maybe (11) -// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition. -// 1..0: Number of trailing non-starters. +// +// 5: Combines forward (0 == false, 1 == true) +// 4..3: NFC_QC Yes(00), No (10), or Maybe (11) +// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition. +// 1..0: Number of trailing non-starters. // // When all 4 bits are zero, the character is inert, meaning it is never // influenced by normalization. diff --git a/vendor/golang.org/x/text/unicode/norm/normalize.go b/vendor/golang.org/x/text/unicode/norm/normalize.go index 95efcf26e81d7..4747ad07a839c 100644 --- a/vendor/golang.org/x/text/unicode/norm/normalize.go +++ b/vendor/golang.org/x/text/unicode/norm/normalize.go @@ -18,16 +18,17 @@ import ( // A Form denotes a canonical representation of Unicode code points. // The Unicode-defined normalization and equivalence forms are: // -// NFC Unicode Normalization Form C -// NFD Unicode Normalization Form D -// NFKC Unicode Normalization Form KC -// NFKD Unicode Normalization Form KD +// NFC Unicode Normalization Form C +// NFD Unicode Normalization Form D +// NFKC Unicode Normalization Form KC +// NFKD Unicode Normalization Form KD // // For a Form f, this documentation uses the notation f(x) to mean // the bytes or string x converted to the given form. // A position n in x is called a boundary if conversion to the form can // proceed independently on both sides: -// f(x) == append(f(x[0:n]), f(x[n:])...) +// +// f(x) == append(f(x[0:n]), f(x[n:])...) // // References: https://unicode.org/reports/tr15/ and // https://unicode.org/notes/tn5/. diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index 96a130d30e9e2..9115ef257e83c 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -7315,7 +7315,7 @@ const recompMapPacked = "" + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E - "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00v\x03#\x00\x00\x1e\x7f" + // 0x00760323: 0x00001E7F "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 @@ -7342,7 +7342,7 @@ const recompMapPacked = "" + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 - "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x01\x7f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 diff --git a/vendor/golang.org/x/text/width/tables10.0.0.go b/vendor/golang.org/x/text/width/tables10.0.0.go index 186b1d4efac5a..cd9d91cafbb88 100644 --- a/vendor/golang.org/x/text/width/tables10.0.0.go +++ b/vendor/golang.org/x/text/width/tables10.0.0.go @@ -1146,21 +1146,31 @@ var widthIndex = [1408]uint8{ } // inverseData contains 4-byte entries of the following format: -// <0 padding> +// +// <0 padding> +// // The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the // UTF-8 encoding of the original rune. Mappings often have the following // pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... +// +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// // By xor-ing the last byte the same entry can be shared by many mappings. This // reduces the total number of distinct entries by about two thirds. // The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } +// +// { 0x01, 0xE0, 0x00, 0x00 } +// // Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. +// +// E0 ^ A1 = 41. +// // Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. +// +// E0 ^ A2 = 42. +// // Note that because of the xor-ing, the byte sequence stored in the entry is // not valid UTF-8. var inverseData = [150][4]byte{ diff --git a/vendor/golang.org/x/text/width/tables11.0.0.go b/vendor/golang.org/x/text/width/tables11.0.0.go index 990f7622f1755..327eaef9b7016 100644 --- a/vendor/golang.org/x/text/width/tables11.0.0.go +++ b/vendor/golang.org/x/text/width/tables11.0.0.go @@ -1158,21 +1158,31 @@ var widthIndex = [1408]uint8{ } // inverseData contains 4-byte entries of the following format: -// <0 padding> +// +// <0 padding> +// // The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the // UTF-8 encoding of the original rune. Mappings often have the following // pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... +// +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// // By xor-ing the last byte the same entry can be shared by many mappings. This // reduces the total number of distinct entries by about two thirds. // The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } +// +// { 0x01, 0xE0, 0x00, 0x00 } +// // Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. +// +// E0 ^ A1 = 41. +// // Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. +// +// E0 ^ A2 = 42. +// // Note that because of the xor-ing, the byte sequence stored in the entry is // not valid UTF-8. var inverseData = [150][4]byte{ diff --git a/vendor/golang.org/x/text/width/tables12.0.0.go b/vendor/golang.org/x/text/width/tables12.0.0.go index 85296297e38c9..5c14ade6d9b1c 100644 --- a/vendor/golang.org/x/text/width/tables12.0.0.go +++ b/vendor/golang.org/x/text/width/tables12.0.0.go @@ -1178,21 +1178,31 @@ var widthIndex = [1408]uint8{ } // inverseData contains 4-byte entries of the following format: -// <0 padding> +// +// <0 padding> +// // The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the // UTF-8 encoding of the original rune. Mappings often have the following // pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... +// +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// // By xor-ing the last byte the same entry can be shared by many mappings. This // reduces the total number of distinct entries by about two thirds. // The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } +// +// { 0x01, 0xE0, 0x00, 0x00 } +// // Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. +// +// E0 ^ A1 = 41. +// // Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. +// +// E0 ^ A2 = 42. +// // Note that because of the xor-ing, the byte sequence stored in the entry is // not valid UTF-8. var inverseData = [150][4]byte{ diff --git a/vendor/golang.org/x/text/width/tables13.0.0.go b/vendor/golang.org/x/text/width/tables13.0.0.go index bac3f1aee3413..ab258e3848c58 100644 --- a/vendor/golang.org/x/text/width/tables13.0.0.go +++ b/vendor/golang.org/x/text/width/tables13.0.0.go @@ -1179,21 +1179,31 @@ var widthIndex = [1408]uint8{ } // inverseData contains 4-byte entries of the following format: -// <0 padding> +// +// <0 padding> +// // The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the // UTF-8 encoding of the original rune. Mappings often have the following // pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... +// +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// // By xor-ing the last byte the same entry can be shared by many mappings. This // reduces the total number of distinct entries by about two thirds. // The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } +// +// { 0x01, 0xE0, 0x00, 0x00 } +// // Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. +// +// E0 ^ A1 = 41. +// // Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. +// +// E0 ^ A2 = 42. +// // Note that because of the xor-ing, the byte sequence stored in the entry is // not valid UTF-8. var inverseData = [150][4]byte{ diff --git a/vendor/golang.org/x/text/width/tables9.0.0.go b/vendor/golang.org/x/text/width/tables9.0.0.go index b3db84f6f9b64..6781f3d960bd3 100644 --- a/vendor/golang.org/x/text/width/tables9.0.0.go +++ b/vendor/golang.org/x/text/width/tables9.0.0.go @@ -1114,21 +1114,31 @@ var widthIndex = [1408]uint8{ } // inverseData contains 4-byte entries of the following format: -// <0 padding> +// +// <0 padding> +// // The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the // UTF-8 encoding of the original rune. Mappings often have the following // pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... +// +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// // By xor-ing the last byte the same entry can be shared by many mappings. This // reduces the total number of distinct entries by about two thirds. // The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } +// +// { 0x01, 0xE0, 0x00, 0x00 } +// // Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. +// +// E0 ^ A1 = 41. +// // Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. +// +// E0 ^ A2 = 42. +// // Note that because of the xor-ing, the byte sequence stored in the entry is // not valid UTF-8. var inverseData = [150][4]byte{ diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/golang.org/x/time/AUTHORS deleted file mode 100644 index 15167cd746c56..0000000000000 --- a/vendor/golang.org/x/time/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/golang.org/x/time/CONTRIBUTORS deleted file mode 100644 index 1c4577e968061..0000000000000 --- a/vendor/golang.org/x/time/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index b0b982e9c6e6a..8f7c29f156aa7 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -80,6 +80,19 @@ func (lim *Limiter) Burst() int { return lim.burst } +// TokensAt returns the number of tokens available at time t. +func (lim *Limiter) TokensAt(t time.Time) float64 { + lim.mu.Lock() + _, _, tokens := lim.advance(t) // does not mutute lim + lim.mu.Unlock() + return tokens +} + +// Tokens returns the number of tokens available now. +func (lim *Limiter) Tokens() float64 { + return lim.TokensAt(time.Now()) +} + // NewLimiter returns a new Limiter that allows events up to rate r and permits // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { @@ -89,16 +102,16 @@ func NewLimiter(r Limit, b int) *Limiter { } } -// Allow is shorthand for AllowN(time.Now(), 1). +// Allow reports whether an event may happen now. func (lim *Limiter) Allow() bool { return lim.AllowN(time.Now(), 1) } -// AllowN reports whether n events may happen at time now. +// AllowN reports whether n events may happen at time t. // Use this method if you intend to drop / skip events that exceed the rate limit. // Otherwise use Reserve or Wait. -func (lim *Limiter) AllowN(now time.Time, n int) bool { - return lim.reserveN(now, n, 0).ok +func (lim *Limiter) AllowN(t time.Time, n int) bool { + return lim.reserveN(t, n, 0).ok } // A Reservation holds information about events that are permitted by a Limiter to happen after a delay. @@ -125,17 +138,17 @@ func (r *Reservation) Delay() time.Duration { } // InfDuration is the duration returned by Delay when a Reservation is not OK. -const InfDuration = time.Duration(1<<63 - 1) +const InfDuration = time.Duration(math.MaxInt64) // DelayFrom returns the duration for which the reservation holder must wait // before taking the reserved action. Zero duration means act immediately. // InfDuration means the limiter cannot grant the tokens requested in this // Reservation within the maximum wait time. -func (r *Reservation) DelayFrom(now time.Time) time.Duration { +func (r *Reservation) DelayFrom(t time.Time) time.Duration { if !r.ok { return InfDuration } - delay := r.timeToAct.Sub(now) + delay := r.timeToAct.Sub(t) if delay < 0 { return 0 } @@ -150,7 +163,7 @@ func (r *Reservation) Cancel() { // CancelAt indicates that the reservation holder will not perform the reserved action // and reverses the effects of this Reservation on the rate limit as much as possible, // considering that other reservations may have already been made. -func (r *Reservation) CancelAt(now time.Time) { +func (r *Reservation) CancelAt(t time.Time) { if !r.ok { return } @@ -158,7 +171,7 @@ func (r *Reservation) CancelAt(now time.Time) { r.lim.mu.Lock() defer r.lim.mu.Unlock() - if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(t) { return } @@ -170,18 +183,18 @@ func (r *Reservation) CancelAt(now time.Time) { return } // advance time to now - now, _, tokens := r.lim.advance(now) + t, _, tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { tokens = burst } // update state - r.lim.last = now + r.lim.last = t r.lim.tokens = tokens if r.timeToAct == r.lim.lastEvent { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) - if !prevEvent.Before(now) { + if !prevEvent.Before(t) { r.lim.lastEvent = prevEvent } } @@ -196,18 +209,20 @@ func (lim *Limiter) Reserve() *Reservation { // The Limiter takes this Reservation into account when allowing future events. // The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. // Usage example: -// r := lim.ReserveN(time.Now(), 1) -// if !r.OK() { -// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? -// return -// } -// time.Sleep(r.Delay()) -// Act() +// +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// // Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. // If you need to respect a deadline or cancel the delay, use Wait instead. // To drop or skip events exceeding rate limit, use Allow instead. -func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { - r := lim.reserveN(now, n, InfDuration) +func (lim *Limiter) ReserveN(t time.Time, n int) *Reservation { + r := lim.reserveN(t, n, InfDuration) return &r } @@ -221,6 +236,18 @@ func (lim *Limiter) Wait(ctx context.Context) (err error) { // canceled, or the expected wait time exceeds the Context's Deadline. // The burst limit is ignored if the rate limit is Inf. func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + // The test code calls lim.wait with a fake timer generator. + // This is the real timer generator. + newTimer := func(d time.Duration) (<-chan time.Time, func() bool, func()) { + timer := time.NewTimer(d) + return timer.C, timer.Stop, func() {} + } + + return lim.wait(ctx, n, time.Now(), newTimer) +} + +// wait is the internal implementation of WaitN. +func (lim *Limiter) wait(ctx context.Context, n int, t time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { lim.mu.Lock() burst := lim.burst limit := lim.limit @@ -236,25 +263,25 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { default: } // Determine wait limit - now := time.Now() waitLimit := InfDuration if deadline, ok := ctx.Deadline(); ok { - waitLimit = deadline.Sub(now) + waitLimit = deadline.Sub(t) } // Reserve - r := lim.reserveN(now, n, waitLimit) + r := lim.reserveN(t, n, waitLimit) if !r.ok { return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) } // Wait if necessary - delay := r.DelayFrom(now) + delay := r.DelayFrom(t) if delay == 0 { return nil } - t := time.NewTimer(delay) - defer t.Stop() + ch, stop, advance := newTimer(delay) + defer stop() + advance() // only has an effect when testing select { - case <-t.C: + case <-ch: // We can proceed. return nil case <-ctx.Done(): @@ -273,13 +300,13 @@ func (lim *Limiter) SetLimit(newLimit Limit) { // SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated // or underutilized by those which reserved (using Reserve or Wait) but did not yet act // before SetLimitAt was called. -func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { +func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, _, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.limit = newLimit } @@ -290,13 +317,13 @@ func (lim *Limiter) SetBurst(newBurst int) { } // SetBurstAt sets a new burst size for the limiter. -func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { +func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, _, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.burst = newBurst } @@ -304,7 +331,7 @@ func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { // reserveN is a helper method for AllowN, ReserveN, and WaitN. // maxFutureReserve specifies the maximum reservation wait duration allowed. // reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. -func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { +func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) Reservation { lim.mu.Lock() defer lim.mu.Unlock() @@ -313,7 +340,7 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: true, lim: lim, tokens: n, - timeToAct: now, + timeToAct: t, } } else if lim.limit == 0 { var ok bool @@ -325,11 +352,11 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: ok, lim: lim, tokens: lim.burst, - timeToAct: now, + timeToAct: t, } } - now, last, tokens := lim.advance(now) + t, last, tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -351,12 +378,12 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio } if ok { r.tokens = n - r.timeToAct = now.Add(waitDuration) + r.timeToAct = t.Add(waitDuration) } // Update state if ok { - lim.last = now + lim.last = t lim.tokens = tokens lim.lastEvent = r.timeToAct } else { @@ -369,20 +396,20 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newT time.Time, newLast time.Time, newTokens float64) { last := lim.last - if now.Before(last) { - last = now + if t.Before(last) { + last = t } // Calculate the new number of tokens, due to time that passed. - elapsed := now.Sub(last) + elapsed := t.Sub(last) delta := lim.limit.tokensFromDuration(elapsed) tokens := lim.tokens + delta if burst := float64(lim.burst); tokens > burst { tokens = burst } - return now, last, tokens + return t, last, tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration diff --git a/vendor/golang.org/x/tools/AUTHORS b/vendor/golang.org/x/tools/AUTHORS deleted file mode 100644 index 15167cd746c56..0000000000000 --- a/vendor/golang.org/x/tools/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/tools/CONTRIBUTORS b/vendor/golang.org/x/tools/CONTRIBUTORS deleted file mode 100644 index 1c4577e968061..0000000000000 --- a/vendor/golang.org/x/tools/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index cec819d641060..2ed25a750248e 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -17,32 +17,47 @@ // developer tools, which will then be able to consume both Go 1.7 and // Go 1.8 export data files, so they will work before and after the // Go update. (See discussion at https://golang.org/issue/15651.) -// package gcexportdata // import "golang.org/x/tools/go/gcexportdata" import ( "bufio" "bytes" + "encoding/json" "fmt" "go/token" "go/types" "io" "io/ioutil" + "os/exec" "golang.org/x/tools/go/internal/gcimporter" ) // Find returns the name of an object (.o) or archive (.a) file // containing type information for the specified import path, -// using the workspace layout conventions of go/build. +// using the go command. // If no file was found, an empty filename is returned. // // A relative srcDir is interpreted relative to the current working directory. // // Find also returns the package's resolved (canonical) import path, // reflecting the effects of srcDir and vendoring on importPath. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. func Find(importPath, srcDir string) (filename, path string) { - return gcimporter.FindPkg(importPath, srcDir) + cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) + cmd.Dir = srcDir + out, err := cmd.CombinedOutput() + if err != nil { + return "", "" + } + var data struct { + ImportPath string + Export string + } + json.Unmarshal(out, &data) + return data.Export, data.ImportPath } // NewReader returns a reader for the export data section of an object @@ -101,13 +116,29 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // The indexed export format starts with an 'i'; the older // binary export format starts with a 'c', 'd', or 'v' // (from "version"). Select appropriate importer. - if len(data) > 0 && data[0] == 'i' { - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err - } + if len(data) > 0 { + switch data[0] { + case 'i': + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + + case 'v', 'c', 'd': + _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + return pkg, err - _, pkg, err := gcimporter.BImportData(fset, imports, data, path) - return pkg, err + case 'u': + _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) + } + } + return nil, fmt.Errorf("empty export data for %s", path) } // Write writes encoded type information for the specified package to out. diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go index efe221e7e1423..37a7247e26867 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/importer.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -23,6 +23,8 @@ import ( // or to control the FileSet or access the imports map populated during // package loading. // +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { return importer{fset, imports} } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go index 0a3cdb9a3b81c..196cb3f9b41a3 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go @@ -35,16 +35,18 @@ import ( const debugFormat = false // default: false // Current export format version. Increase with each format change. +// // Note: The latest binary (non-indexed) export format is at version 6. -// This exporter is still at level 4, but it doesn't matter since -// the binary importer can handle older versions just fine. -// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE -// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE -// 4: type name objects support type aliases, uses aliasTag -// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) -// 2: removed unused bool in ODCL export (compiler only) -// 1: header format change (more regular), export package for _ struct fields -// 0: Go1.7 encoding +// This exporter is still at level 4, but it doesn't matter since +// the binary importer can handle older versions just fine. +// +// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE +// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE +// 4: type name objects support type aliases, uses aliasTag +// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) +// 2: removed unused bool in ODCL export (compiler only) +// 1: header format change (more regular), export package for _ struct fields +// 0: Go1.7 encoding const exportVersion = 4 // trackAllTypes enables cycle tracking for all types, not just named diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go index 3ab66830d747c..e96c39600d16b 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -45,7 +45,6 @@ var pkgExts = [...]string{".a", ".o"} // the build.Default build.Context). A relative srcDir is interpreted // relative to the current working directory. // If no file was found, an empty filename is returned. -// func FindPkg(path, srcDir string) (filename, id string) { if path == "" { return @@ -109,7 +108,6 @@ func FindPkg(path, srcDir string) (filename, id string) { // If packages[id] contains the completely imported package, that package // can be used directly, and there is no need to call this function (but // there is also no harm but for extra time used). -// func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) { // support for parser error handling defer func() { @@ -133,7 +131,6 @@ func ImportData(packages map[string]*types.Package, filename, id string, data io // Import imports a gc-generated package given its import path and srcDir, adds // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. -// func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser var filename, id string @@ -184,8 +181,9 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func defer rc.Close() var hdr string + var size int64 buf := bufio.NewReader(rc) - if hdr, _, err = FindExportData(buf); err != nil { + if hdr, size, err = FindExportData(buf); err != nil { return } @@ -213,10 +211,27 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // The indexed export format starts with an 'i'; the older // binary export format starts with a 'c', 'd', or 'v' // (from "version"). Select appropriate importer. - if len(data) > 0 && data[0] == 'i' { - _, pkg, err = IImportData(fset, packages, data[1:], id) - } else { - _, pkg, err = BImportData(fset, packages, data, id) + if len(data) > 0 { + switch data[0] { + case 'i': + _, pkg, err := IImportData(fset, packages, data[1:], id) + return pkg, err + + case 'v', 'c', 'd': + _, pkg, err := BImportData(fset, packages, data, id) + return pkg, err + + case 'u': + _, pkg, err := UImportData(fset, packages, data[1:size], id) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) + } } default: @@ -348,8 +363,9 @@ func (p *parser) expectKeyword(keyword string) { // ---------------------------------------------------------------------------- // Qualified and unqualified names -// PackageId = string_lit . +// parsePackageID parses a PackageId: // +// PackageId = string_lit . func (p *parser) parsePackageID() string { id, err := strconv.Unquote(p.expect(scanner.String)) if err != nil { @@ -363,13 +379,16 @@ func (p *parser) parsePackageID() string { return id } -// PackageName = ident . +// parsePackageName parse a PackageName: // +// PackageName = ident . func (p *parser) parsePackageName() string { return p.expect(scanner.Ident) } -// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . +// parseDotIdent parses a dotIdentifier: +// +// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . func (p *parser) parseDotIdent() string { ident := "" if p.tok != scanner.Int { @@ -386,8 +405,9 @@ func (p *parser) parseDotIdent() string { return ident } -// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . +// parseQualifiedName parses a QualifiedName: // +// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . func (p *parser) parseQualifiedName() (id, name string) { p.expect('@') id = p.parsePackageID() @@ -410,7 +430,6 @@ func (p *parser) parseQualifiedName() (id, name string) { // id identifies a package, usually by a canonical package path like // "encoding/json" but possibly by a non-canonical import path like // "./json". -// func (p *parser) getPkg(id, name string) *types.Package { // package unsafe is not in the packages maps - handle explicitly if id == "unsafe" { @@ -446,7 +465,6 @@ func (p *parser) getPkg(id, name string) *types.Package { // parseExportedName is like parseQualifiedName, but // the package id is resolved to an imported *types.Package. -// func (p *parser) parseExportedName() (pkg *types.Package, name string) { id, name := p.parseQualifiedName() pkg = p.getPkg(id, "") @@ -456,8 +474,9 @@ func (p *parser) parseExportedName() (pkg *types.Package, name string) { // ---------------------------------------------------------------------------- // Types -// BasicType = identifier . +// parseBasicType parses a BasicType: // +// BasicType = identifier . func (p *parser) parseBasicType() types.Type { id := p.expect(scanner.Ident) obj := types.Universe.Lookup(id) @@ -468,8 +487,9 @@ func (p *parser) parseBasicType() types.Type { return nil } -// ArrayType = "[" int_lit "]" Type . +// parseArrayType parses an ArrayType: // +// ArrayType = "[" int_lit "]" Type . func (p *parser) parseArrayType(parent *types.Package) types.Type { // "[" already consumed and lookahead known not to be "]" lit := p.expect(scanner.Int) @@ -482,8 +502,9 @@ func (p *parser) parseArrayType(parent *types.Package) types.Type { return types.NewArray(elem, n) } -// MapType = "map" "[" Type "]" Type . +// parseMapType parses a MapType: // +// MapType = "map" "[" Type "]" Type . func (p *parser) parseMapType(parent *types.Package) types.Type { p.expectKeyword("map") p.expect('[') @@ -493,7 +514,9 @@ func (p *parser) parseMapType(parent *types.Package) types.Type { return types.NewMap(key, elem) } -// Name = identifier | "?" | QualifiedName . +// parseName parses a Name: +// +// Name = identifier | "?" | QualifiedName . // // For unqualified and anonymous names, the returned package is the parent // package unless parent == nil, in which case the returned package is the @@ -505,7 +528,6 @@ func (p *parser) parseMapType(parent *types.Package) types.Type { // it doesn't exist yet) unless materializePkg is set (which creates an // unnamed package with valid package path). In the latter case, a // subsequent import clause is expected to provide a name for the package. -// func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) { pkg = parent if pkg == nil { @@ -539,8 +561,9 @@ func deref(typ types.Type) types.Type { return typ } -// Field = Name Type [ string_lit ] . +// parseField parses a Field: // +// Field = Name Type [ string_lit ] . func (p *parser) parseField(parent *types.Package) (*types.Var, string) { pkg, name := p.parseName(parent, true) @@ -583,9 +606,10 @@ func (p *parser) parseField(parent *types.Package) (*types.Var, string) { return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag } -// StructType = "struct" "{" [ FieldList ] "}" . -// FieldList = Field { ";" Field } . +// parseStructType parses a StructType: // +// StructType = "struct" "{" [ FieldList ] "}" . +// FieldList = Field { ";" Field } . func (p *parser) parseStructType(parent *types.Package) types.Type { var fields []*types.Var var tags []string @@ -610,8 +634,9 @@ func (p *parser) parseStructType(parent *types.Package) types.Type { return types.NewStruct(fields, tags) } -// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . +// parseParameter parses a Parameter: // +// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { _, name := p.parseName(nil, false) // remove gc-specific parameter numbering @@ -635,9 +660,10 @@ func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { return } -// Parameters = "(" [ ParameterList ] ")" . -// ParameterList = { Parameter "," } Parameter . +// parseParameters parses a Parameters: // +// Parameters = "(" [ ParameterList ] ")" . +// ParameterList = { Parameter "," } Parameter . func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { p.expect('(') for p.tok != ')' && p.tok != scanner.EOF { @@ -658,9 +684,10 @@ func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { return } -// Signature = Parameters [ Result ] . -// Result = Type | Parameters . +// parseSignature parses a Signature: // +// Signature = Parameters [ Result ] . +// Result = Type | Parameters . func (p *parser) parseSignature(recv *types.Var) *types.Signature { params, isVariadic := p.parseParameters() @@ -677,14 +704,15 @@ func (p *parser) parseSignature(recv *types.Var) *types.Signature { return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic) } -// InterfaceType = "interface" "{" [ MethodList ] "}" . -// MethodList = Method { ";" Method } . -// Method = Name Signature . +// parseInterfaceType parses an InterfaceType: +// +// InterfaceType = "interface" "{" [ MethodList ] "}" . +// MethodList = Method { ";" Method } . +// Method = Name Signature . // // The methods of embedded interfaces are always "inlined" // by the compiler and thus embedded interfaces are never // visible in the export data. -// func (p *parser) parseInterfaceType(parent *types.Package) types.Type { var methods []*types.Func @@ -705,8 +733,9 @@ func (p *parser) parseInterfaceType(parent *types.Package) types.Type { return newInterface(methods, nil).Complete() } -// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . +// parseChanType parses a ChanType: // +// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . func (p *parser) parseChanType(parent *types.Package) types.Type { dir := types.SendRecv if p.tok == scanner.Ident { @@ -724,17 +753,18 @@ func (p *parser) parseChanType(parent *types.Package) types.Type { return types.NewChan(dir, elem) } -// Type = -// BasicType | TypeName | ArrayType | SliceType | StructType | -// PointerType | FuncType | InterfaceType | MapType | ChanType | -// "(" Type ")" . +// parseType parses a Type: // -// BasicType = ident . -// TypeName = ExportedName . -// SliceType = "[" "]" Type . -// PointerType = "*" Type . -// FuncType = "func" Signature . +// Type = +// BasicType | TypeName | ArrayType | SliceType | StructType | +// PointerType | FuncType | InterfaceType | MapType | ChanType | +// "(" Type ")" . // +// BasicType = ident . +// TypeName = ExportedName . +// SliceType = "[" "]" Type . +// PointerType = "*" Type . +// FuncType = "func" Signature . func (p *parser) parseType(parent *types.Package) types.Type { switch p.tok { case scanner.Ident: @@ -786,16 +816,18 @@ func (p *parser) parseType(parent *types.Package) types.Type { // ---------------------------------------------------------------------------- // Declarations -// ImportDecl = "import" PackageName PackageId . +// parseImportDecl parses an ImportDecl: // +// ImportDecl = "import" PackageName PackageId . func (p *parser) parseImportDecl() { p.expectKeyword("import") name := p.parsePackageName() p.getPkg(p.parsePackageID(), name) } -// int_lit = [ "+" | "-" ] { "0" ... "9" } . +// parseInt parses an int_lit: // +// int_lit = [ "+" | "-" ] { "0" ... "9" } . func (p *parser) parseInt() string { s := "" switch p.tok { @@ -808,8 +840,9 @@ func (p *parser) parseInt() string { return s + p.expect(scanner.Int) } -// number = int_lit [ "p" int_lit ] . +// parseNumber parses a number: // +// number = int_lit [ "p" int_lit ] . func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { // mantissa mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0) @@ -844,13 +877,14 @@ func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { return } -// ConstDecl = "const" ExportedName [ Type ] "=" Literal . -// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . -// bool_lit = "true" | "false" . -// complex_lit = "(" float_lit "+" float_lit "i" ")" . -// rune_lit = "(" int_lit "+" int_lit ")" . -// string_lit = `"` { unicode_char } `"` . +// parseConstDecl parses a ConstDecl: // +// ConstDecl = "const" ExportedName [ Type ] "=" Literal . +// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . +// bool_lit = "true" | "false" . +// complex_lit = "(" float_lit "+" float_lit "i" ")" . +// rune_lit = "(" int_lit "+" int_lit ")" . +// string_lit = `"` { unicode_char } `"` . func (p *parser) parseConstDecl() { p.expectKeyword("const") pkg, name := p.parseExportedName() @@ -920,8 +954,9 @@ func (p *parser) parseConstDecl() { pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val)) } -// TypeDecl = "type" ExportedName Type . +// parseTypeDecl parses a TypeDecl: // +// TypeDecl = "type" ExportedName Type . func (p *parser) parseTypeDecl() { p.expectKeyword("type") pkg, name := p.parseExportedName() @@ -939,8 +974,9 @@ func (p *parser) parseTypeDecl() { } } -// VarDecl = "var" ExportedName Type . +// parseVarDecl parses a VarDecl: // +// VarDecl = "var" ExportedName Type . func (p *parser) parseVarDecl() { p.expectKeyword("var") pkg, name := p.parseExportedName() @@ -948,9 +984,10 @@ func (p *parser) parseVarDecl() { pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ)) } -// Func = Signature [ Body ] . -// Body = "{" ... "}" . +// parseFunc parses a Func: // +// Func = Signature [ Body ] . +// Body = "{" ... "}" . func (p *parser) parseFunc(recv *types.Var) *types.Signature { sig := p.parseSignature(recv) if p.tok == '{' { @@ -967,9 +1004,10 @@ func (p *parser) parseFunc(recv *types.Var) *types.Signature { return sig } -// MethodDecl = "func" Receiver Name Func . -// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . +// parseMethodDecl parses a MethodDecl: // +// MethodDecl = "func" Receiver Name Func . +// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . func (p *parser) parseMethodDecl() { // "func" already consumed p.expect('(') @@ -992,8 +1030,9 @@ func (p *parser) parseMethodDecl() { base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig)) } -// FuncDecl = "func" ExportedName Func . +// parseFuncDecl parses a FuncDecl: // +// FuncDecl = "func" ExportedName Func . func (p *parser) parseFuncDecl() { // "func" already consumed pkg, name := p.parseExportedName() @@ -1001,8 +1040,9 @@ func (p *parser) parseFuncDecl() { pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ)) } -// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . +// parseDecl parses a Decl: // +// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . func (p *parser) parseDecl() { if p.tok == scanner.Ident { switch p.lit { @@ -1029,9 +1069,10 @@ func (p *parser) parseDecl() { // ---------------------------------------------------------------------------- // Export -// Export = "PackageClause { Decl } "$$" . -// PackageClause = "package" PackageName [ "safe" ] "\n" . +// parseExport parses an Export: // +// Export = "PackageClause { Decl } "$$" . +// PackageClause = "package" PackageName [ "safe" ] "\n" . func (p *parser) parseExport() *types.Package { p.expectKeyword("package") name := p.parsePackageName() diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go index 209553409cb5f..9a4ff329e1280 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go @@ -251,7 +251,10 @@ func (p *iexporter) stringOff(s string) uint64 { // pushDecl adds n to the declaration work queue, if not already present. func (p *iexporter) pushDecl(obj types.Object) { // Package unsafe is known to the compiler and predeclared. - assert(obj.Pkg() != types.Unsafe) + // Caller should not ask us to do export it. + if obj.Pkg() == types.Unsafe { + panic("cannot export package unsafe") + } if _, ok := p.declIndex[obj]; ok { return diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go index 84cfb807d739e..4caa0f55d9de2 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go @@ -17,6 +17,7 @@ import ( "go/token" "go/types" "io" + "math/big" "sort" "strings" @@ -53,7 +54,7 @@ const ( ) type ident struct { - pkg string + pkg *types.Package name string } @@ -100,7 +101,9 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data if !debug { defer func() { if e := recover(); e != nil { - if version > currentVersion { + if bundle { + err = fmt.Errorf("%v", e) + } else if version > currentVersion { err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) } else { err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) @@ -461,7 +464,7 @@ func (r *importReader) obj(name string) { // To handle recursive references to the typeparam within its // bound, save the partial type in tparamIndex before reading the bounds. - id := ident{r.currPkg.Name(), name} + id := ident{r.currPkg, name} r.p.tparamIndex[id] = t var implicit bool if r.p.version >= iexportVersionGo1_18 { @@ -510,7 +513,9 @@ func (r *importReader) value() (typ types.Type, val constant.Value) { val = constant.MakeString(r.string()) case types.IsInteger: - val = r.mpint(b) + var x big.Int + r.mpint(&x, b) + val = constant.Make(&x) case types.IsFloat: val = r.mpfloat(b) @@ -559,8 +564,8 @@ func intSize(b *types.Basic) (signed bool, maxBytes uint) { return } -func (r *importReader) mpint(b *types.Basic) constant.Value { - signed, maxBytes := intSize(b) +func (r *importReader) mpint(x *big.Int, typ *types.Basic) { + signed, maxBytes := intSize(typ) maxSmall := 256 - maxBytes if signed { @@ -579,7 +584,8 @@ func (r *importReader) mpint(b *types.Basic) constant.Value { v = ^v } } - return constant.MakeInt64(v) + x.SetInt64(v) + return } v := -n @@ -589,47 +595,23 @@ func (r *importReader) mpint(b *types.Basic) constant.Value { if v < 1 || uint(v) > maxBytes { errorf("weird decoding: %v, %v => %v", n, signed, v) } - - buf := make([]byte, v) - io.ReadFull(&r.declReader, buf) - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 { - buf[i], buf[j] = buf[j], buf[i] - } - - x := constant.MakeFromBytes(buf) + b := make([]byte, v) + io.ReadFull(&r.declReader, b) + x.SetBytes(b) if signed && n&1 != 0 { - x = constant.UnaryOp(token.SUB, x, 0) + x.Neg(x) } - return x } -func (r *importReader) mpfloat(b *types.Basic) constant.Value { - x := r.mpint(b) - if constant.Sign(x) == 0 { - return x - } - - exp := r.int64() - switch { - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - // Ensure that the imported Kind is Float, else this constant may run into - // bitsize limits on overlarge integers. Eventually we can instead adopt - // the approach of CL 288632, but that CL relies on go/constant APIs that - // were introduced in go1.13. - // - // TODO(rFindley): sync the logic here with tip Go once we no longer - // support go1.12. - x = constant.ToFloat(x) - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) +func (r *importReader) mpfloat(typ *types.Basic) constant.Value { + var mant big.Int + r.mpint(&mant, typ) + var f big.Float + f.SetInt(&mant) + if f.Sign() != 0 { + f.SetMantExp(&f, int(r.int64())) } - return x + return constant.Make(&f) } func (r *importReader) ident() string { @@ -777,7 +759,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { errorf("unexpected type param type") } pkg, name := r.qualifiedIdent() - id := ident{pkg.Name(), name} + id := ident{pkg, name} if t, ok := r.p.tparamIndex[id]; ok { // We're already in the process of importing this typeparam. return t diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/go/internal/gcimporter/unified_no.go new file mode 100644 index 0000000000000..286bf445483d8 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/unified_no.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(go1.18 && goexperiment.unified) +// +build !go1.18 !goexperiment.unified + +package gcimporter + +const unifiedIR = false diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/go/internal/gcimporter/unified_yes.go new file mode 100644 index 0000000000000..b5d69ffbe682d --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/unified_yes.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 && goexperiment.unified +// +build go1.18,goexperiment.unified + +package gcimporter + +const unifiedIR = true diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_no.go b/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_no.go new file mode 100644 index 0000000000000..8eb20729c2ad5 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_no.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" +) + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data") + return +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go new file mode 100644 index 0000000000000..3c1a4375435a7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go @@ -0,0 +1,612 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Derived from go/internal/gcimporter/ureader.go + +//go:build go1.18 +// +build go1.18 + +package gcimporter + +import ( + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/internal/pkgbits" +) + +// A pkgReader holds the shared state for reading a unified IR package +// description. +type pkgReader struct { + pkgbits.PkgDecoder + + fake fakeFileSet + + ctxt *types.Context + imports map[string]*types.Package // previously imported packages, indexed by path + + // lazily initialized arrays corresponding to the unified IR + // PosBase, Pkg, and Type sections, respectively. + posBases []string // position bases (i.e., file names) + pkgs []*types.Package + typs []types.Type + + // laterFns holds functions that need to be invoked at the end of + // import reading. + laterFns []func() +} + +// later adds a function to be invoked at the end of import reading. +func (pr *pkgReader) later(fn func()) { + pr.laterFns = append(pr.laterFns, fn) +} + +// See cmd/compile/internal/noder.derivedInfo. +type derivedInfo struct { + idx pkgbits.Index + needed bool +} + +// See cmd/compile/internal/noder.typeInfo. +type typeInfo struct { + idx pkgbits.Index + derived bool +} + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + s := string(data) + s = s[:strings.LastIndex(s, "\n$$\n")] + input := pkgbits.NewPkgDecoder(path, s) + pkg = readUnifiedPackage(fset, nil, imports, input) + return +} + +// readUnifiedPackage reads a package description from the given +// unified IR export data decoder. +func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { + pr := pkgReader{ + PkgDecoder: input, + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + + ctxt: ctxt, + imports: imports, + + posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), + pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), + typs: make([]types.Type, input.NumElems(pkgbits.RelocType)), + } + defer pr.fake.setLines() + + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) + pkg := r.pkg() + r.Bool() // has init + + for i, n := 0, r.Len(); i < n; i++ { + // As if r.obj(), but avoiding the Scope.Lookup call, + // to avoid eager loading of imports. + r.Sync(pkgbits.SyncObject) + assert(!r.Bool()) + r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + assert(r.Len() == 0) + } + + r.Sync(pkgbits.SyncEOF) + + for _, fn := range pr.laterFns { + fn() + } + + pkg.MarkComplete() + return pkg +} + +// A reader holds the state for reading a single unified IR element +// within a package. +type reader struct { + pkgbits.Decoder + + p *pkgReader + + dict *readerDict +} + +// A readerDict holds the state for type parameters that parameterize +// the current unified IR element. +type readerDict struct { + // bounds is a slice of typeInfos corresponding to the underlying + // bounds of the element's type parameters. + bounds []typeInfo + + // tparams is a slice of the constructed TypeParams for the element. + tparams []*types.TypeParam + + // devived is a slice of types derived from tparams, which may be + // instantiated while reading the current element. + derived []derivedInfo + derivedTypes []types.Type // lazily instantiated from derived +} + +func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.NewDecoder(k, idx, marker), + p: pr, + } +} + +// @@@ Positions + +func (r *reader) pos() token.Pos { + r.Sync(pkgbits.SyncPos) + if !r.Bool() { + return token.NoPos + } + + // TODO(mdempsky): Delta encoding. + posBase := r.posBase() + line := r.Uint() + col := r.Uint() + return r.p.fake.pos(posBase, int(line), int(col)) +} + +func (r *reader) posBase() string { + return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)) +} + +func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string { + if b := pr.posBases[idx]; b != "" { + return b + } + + r := pr.newReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) + + // Within types2, position bases have a lot more details (e.g., + // keeping track of where //line directives appeared exactly). + // + // For go/types, we just track the file name. + + filename := r.String() + + if r.Bool() { // file base + // Was: "b = token.NewTrimmedFileBase(filename, true)" + } else { // line base + pos := r.pos() + line := r.Uint() + col := r.Uint() + + // Was: "b = token.NewLineBase(pos, filename, true, line, col)" + _, _, _ = pos, line, col + } + + b := filename + pr.posBases[idx] = b + return b +} + +// @@@ Packages + +func (r *reader) pkg() *types.Package { + r.Sync(pkgbits.SyncPkg) + return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) +} + +func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { + // TODO(mdempsky): Consider using some non-nil pointer to indicate + // the universe scope, so we don't need to keep re-reading it. + if pkg := pr.pkgs[idx]; pkg != nil { + return pkg + } + + pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() + pr.pkgs[idx] = pkg + return pkg +} + +func (r *reader) doPkg() *types.Package { + path := r.String() + switch path { + case "": + path = r.p.PkgPath() + case "builtin": + return nil // universe + case "unsafe": + return types.Unsafe + } + + if pkg := r.p.imports[path]; pkg != nil { + return pkg + } + + name := r.String() + + pkg := types.NewPackage(path, name) + r.p.imports[path] = pkg + + imports := make([]*types.Package, r.Len()) + for i := range imports { + imports[i] = r.pkg() + } + pkg.SetImports(imports) + + return pkg +} + +// @@@ Types + +func (r *reader) typ() types.Type { + return r.p.typIdx(r.typInfo(), r.dict) +} + +func (r *reader) typInfo() typeInfo { + r.Sync(pkgbits.SyncType) + if r.Bool() { + return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} + } + return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} +} + +func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type { + idx := info.idx + var where *types.Type + if info.derived { + where = &dict.derivedTypes[idx] + idx = dict.derived[idx].idx + } else { + where = &pr.typs[idx] + } + + if typ := *where; typ != nil { + return typ + } + + r := pr.newReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) + r.dict = dict + + typ := r.doTyp() + assert(typ != nil) + + // See comment in pkgReader.typIdx explaining how this happens. + if prev := *where; prev != nil { + return prev + } + + *where = typ + return typ +} + +func (r *reader) doTyp() (res types.Type) { + switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { + default: + errorf("unhandled type tag: %v", tag) + panic("unreachable") + + case pkgbits.TypeBasic: + return types.Typ[r.Len()] + + case pkgbits.TypeNamed: + obj, targs := r.obj() + name := obj.(*types.TypeName) + if len(targs) != 0 { + t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false) + return t + } + return name.Type() + + case pkgbits.TypeTypeParam: + return r.dict.tparams[r.Len()] + + case pkgbits.TypeArray: + len := int64(r.Uint64()) + return types.NewArray(r.typ(), len) + case pkgbits.TypeChan: + dir := types.ChanDir(r.Len()) + return types.NewChan(dir, r.typ()) + case pkgbits.TypeMap: + return types.NewMap(r.typ(), r.typ()) + case pkgbits.TypePointer: + return types.NewPointer(r.typ()) + case pkgbits.TypeSignature: + return r.signature(nil, nil, nil) + case pkgbits.TypeSlice: + return types.NewSlice(r.typ()) + case pkgbits.TypeStruct: + return r.structType() + case pkgbits.TypeInterface: + return r.interfaceType() + case pkgbits.TypeUnion: + return r.unionType() + } +} + +func (r *reader) structType() *types.Struct { + fields := make([]*types.Var, r.Len()) + var tags []string + for i := range fields { + pos := r.pos() + pkg, name := r.selector() + ftyp := r.typ() + tag := r.String() + embedded := r.Bool() + + fields[i] = types.NewField(pos, pkg, name, ftyp, embedded) + if tag != "" { + for len(tags) < i { + tags = append(tags, "") + } + tags = append(tags, tag) + } + } + return types.NewStruct(fields, tags) +} + +func (r *reader) unionType() *types.Union { + terms := make([]*types.Term, r.Len()) + for i := range terms { + terms[i] = types.NewTerm(r.Bool(), r.typ()) + } + return types.NewUnion(terms) +} + +func (r *reader) interfaceType() *types.Interface { + methods := make([]*types.Func, r.Len()) + embeddeds := make([]types.Type, r.Len()) + implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool() + + for i := range methods { + pos := r.pos() + pkg, name := r.selector() + mtyp := r.signature(nil, nil, nil) + methods[i] = types.NewFunc(pos, pkg, name, mtyp) + } + + for i := range embeddeds { + embeddeds[i] = r.typ() + } + + iface := types.NewInterfaceType(methods, embeddeds) + if implicit { + iface.MarkImplicit() + } + return iface +} + +func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature { + r.Sync(pkgbits.SyncSignature) + + params := r.params() + results := r.params() + variadic := r.Bool() + + return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic) +} + +func (r *reader) params() *types.Tuple { + r.Sync(pkgbits.SyncParams) + + params := make([]*types.Var, r.Len()) + for i := range params { + params[i] = r.param() + } + + return types.NewTuple(params...) +} + +func (r *reader) param() *types.Var { + r.Sync(pkgbits.SyncParam) + + pos := r.pos() + pkg, name := r.localIdent() + typ := r.typ() + + return types.NewParam(pos, pkg, name, typ) +} + +// @@@ Objects + +func (r *reader) obj() (types.Object, []types.Type) { + r.Sync(pkgbits.SyncObject) + + assert(!r.Bool()) + + pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + obj := pkgScope(pkg).Lookup(name) + + targs := make([]types.Type, r.Len()) + for i := range targs { + targs[i] = r.typ() + } + + return obj, targs +} + +func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { + rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) + + objPkg, objName := rname.qualifiedIdent() + assert(objName != "") + + tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + + if tag == pkgbits.ObjStub { + assert(objPkg == nil || objPkg == types.Unsafe) + return objPkg, objName + } + + if objPkg.Scope().Lookup(objName) == nil { + dict := pr.objDictIdx(idx) + + r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) + r.dict = dict + + declare := func(obj types.Object) { + objPkg.Scope().Insert(obj) + } + + switch tag { + default: + panic("weird") + + case pkgbits.ObjAlias: + pos := r.pos() + typ := r.typ() + declare(types.NewTypeName(pos, objPkg, objName, typ)) + + case pkgbits.ObjConst: + pos := r.pos() + typ := r.typ() + val := r.Value() + declare(types.NewConst(pos, objPkg, objName, typ, val)) + + case pkgbits.ObjFunc: + pos := r.pos() + tparams := r.typeParamNames() + sig := r.signature(nil, nil, tparams) + declare(types.NewFunc(pos, objPkg, objName, sig)) + + case pkgbits.ObjType: + pos := r.pos() + + obj := types.NewTypeName(pos, objPkg, objName, nil) + named := types.NewNamed(obj, nil, nil) + declare(obj) + + named.SetTypeParams(r.typeParamNames()) + + // TODO(mdempsky): Rewrite receiver types to underlying is an + // Interface? The go/types importer does this (I think because + // unit tests expected that), but cmd/compile doesn't care + // about it, so maybe we can avoid worrying about that here. + rhs := r.typ() + r.p.later(func() { + underlying := rhs.Underlying() + named.SetUnderlying(underlying) + }) + + for i, n := 0, r.Len(); i < n; i++ { + named.AddMethod(r.method()) + } + + case pkgbits.ObjVar: + pos := r.pos() + typ := r.typ() + declare(types.NewVar(pos, objPkg, objName, typ)) + } + } + + return objPkg, objName +} + +func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { + r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) + + var dict readerDict + + if implicits := r.Len(); implicits != 0 { + errorf("unexpected object with %v implicit type parameter(s)", implicits) + } + + dict.bounds = make([]typeInfo, r.Len()) + for i := range dict.bounds { + dict.bounds[i] = r.typInfo() + } + + dict.derived = make([]derivedInfo, r.Len()) + dict.derivedTypes = make([]types.Type, len(dict.derived)) + for i := range dict.derived { + dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + } + + // function references follow, but reader doesn't need those + + return &dict +} + +func (r *reader) typeParamNames() []*types.TypeParam { + r.Sync(pkgbits.SyncTypeParamNames) + + // Note: This code assumes it only processes objects without + // implement type parameters. This is currently fine, because + // reader is only used to read in exported declarations, which are + // always package scoped. + + if len(r.dict.bounds) == 0 { + return nil + } + + // Careful: Type parameter lists may have cycles. To allow for this, + // we construct the type parameter list in two passes: first we + // create all the TypeNames and TypeParams, then we construct and + // set the bound type. + + r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds)) + for i := range r.dict.bounds { + pos := r.pos() + pkg, name := r.localIdent() + + tname := types.NewTypeName(pos, pkg, name, nil) + r.dict.tparams[i] = types.NewTypeParam(tname, nil) + } + + typs := make([]types.Type, len(r.dict.bounds)) + for i, bound := range r.dict.bounds { + typs[i] = r.p.typIdx(bound, r.dict) + } + + // TODO(mdempsky): This is subtle, elaborate further. + // + // We have to save tparams outside of the closure, because + // typeParamNames() can be called multiple times with the same + // dictionary instance. + // + // Also, this needs to happen later to make sure SetUnderlying has + // been called. + // + // TODO(mdempsky): Is it safe to have a single "later" slice or do + // we need to have multiple passes? See comments on CL 386002 and + // go.dev/issue/52104. + tparams := r.dict.tparams + r.p.later(func() { + for i, typ := range typs { + tparams[i].SetConstraint(typ) + } + }) + + return r.dict.tparams +} + +func (r *reader) method() *types.Func { + r.Sync(pkgbits.SyncMethod) + pos := r.pos() + pkg, name := r.selector() + + rparams := r.typeParamNames() + sig := r.signature(r.param(), rparams, nil) + + _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. + return types.NewFunc(pos, pkg, name, sig) +} + +func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) } +func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) } +func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) } + +func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) { + r.Sync(marker) + return r.pkg(), r.String() +} + +// pkgScope returns pkg.Scope(). +// If pkg is nil, it returns types.Universe instead. +// +// TODO(mdempsky): Remove after x/tools can depend on Go 1.19. +func pkgScope(pkg *types.Package) *types.Scope { + if pkg != nil { + return pkg.Scope() + } + return types.Universe +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/codes.go b/vendor/golang.org/x/tools/go/internal/pkgbits/codes.go new file mode 100644 index 0000000000000..f0cabde96eba9 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/codes.go @@ -0,0 +1,77 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A Code is an enum value that can be encoded into bitstreams. +// +// Code types are preferable for enum types, because they allow +// Decoder to detect desyncs. +type Code interface { + // Marker returns the SyncMarker for the Code's dynamic type. + Marker() SyncMarker + + // Value returns the Code's ordinal value. + Value() int +} + +// A CodeVal distinguishes among go/constant.Value encodings. +type CodeVal int + +func (c CodeVal) Marker() SyncMarker { return SyncVal } +func (c CodeVal) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ValBool CodeVal = iota + ValString + ValInt64 + ValBigInt + ValBigRat + ValBigFloat +) + +// A CodeType distinguishes among go/types.Type encodings. +type CodeType int + +func (c CodeType) Marker() SyncMarker { return SyncType } +func (c CodeType) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + TypeBasic CodeType = iota + TypeNamed + TypePointer + TypeSlice + TypeArray + TypeChan + TypeMap + TypeSignature + TypeStruct + TypeInterface + TypeUnion + TypeTypeParam +) + +// A CodeObj distinguishes among go/types.Object encodings. +type CodeObj int + +func (c CodeObj) Marker() SyncMarker { return SyncCodeObj } +func (c CodeObj) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ObjAlias CodeObj = iota + ObjConst + ObjType + ObjFunc + ObjVar + ObjStub +) diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go new file mode 100644 index 0000000000000..2bc793668ec90 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go @@ -0,0 +1,433 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "math/big" + "os" + "runtime" + "strings" +) + +// A PkgDecoder provides methods for decoding a package's Unified IR +// export data. +type PkgDecoder struct { + // version is the file format version. + version uint32 + + // sync indicates whether the file uses sync markers. + sync bool + + // pkgPath is the package path for the package to be decoded. + // + // TODO(mdempsky): Remove; unneeded since CL 391014. + pkgPath string + + // elemData is the full data payload of the encoded package. + // Elements are densely and contiguously packed together. + // + // The last 8 bytes of elemData are the package fingerprint. + elemData string + + // elemEnds stores the byte-offset end positions of element + // bitstreams within elemData. + // + // For example, element I's bitstream data starts at elemEnds[I-1] + // (or 0, if I==0) and ends at elemEnds[I]. + // + // Note: elemEnds is indexed by absolute indices, not + // section-relative indices. + elemEnds []uint32 + + // elemEndsEnds stores the index-offset end positions of relocation + // sections within elemEnds. + // + // For example, section K's end positions start at elemEndsEnds[K-1] + // (or 0, if K==0) and end at elemEndsEnds[K]. + elemEndsEnds [numRelocs]uint32 +} + +// PkgPath returns the package path for the package +// +// TODO(mdempsky): Remove; unneeded since CL 391014. +func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } + +// SyncMarkers reports whether pr uses sync markers. +func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } + +// NewPkgDecoder returns a PkgDecoder initialized to read the Unified +// IR export data from input. pkgPath is the package path for the +// compilation unit that produced the export data. +// +// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. +func NewPkgDecoder(pkgPath, input string) PkgDecoder { + pr := PkgDecoder{ + pkgPath: pkgPath, + } + + // TODO(mdempsky): Implement direct indexing of input string to + // avoid copying the position information. + + r := strings.NewReader(input) + + assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + + switch pr.version { + default: + panic(fmt.Errorf("unsupported version: %v", pr.version)) + case 0: + // no flags + case 1: + var flags uint32 + assert(binary.Read(r, binary.LittleEndian, &flags) == nil) + pr.sync = flags&flagSyncMarkers != 0 + } + + assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) + + pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) + assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) + + pos, err := r.Seek(0, os.SEEK_CUR) + assert(err == nil) + + pr.elemData = input[pos:] + assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + return pr +} + +// NumElems returns the number of elements in section k. +func (pr *PkgDecoder) NumElems(k RelocKind) int { + count := int(pr.elemEndsEnds[k]) + if k > 0 { + count -= int(pr.elemEndsEnds[k-1]) + } + return count +} + +// TotalElems returns the total number of elements across all sections. +func (pr *PkgDecoder) TotalElems() int { + return len(pr.elemEnds) +} + +// Fingerprint returns the package fingerprint. +func (pr *PkgDecoder) Fingerprint() [8]byte { + var fp [8]byte + copy(fp[:], pr.elemData[len(pr.elemData)-8:]) + return fp +} + +// AbsIdx returns the absolute index for the given (section, index) +// pair. +func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { + absIdx := int(idx) + if k > 0 { + absIdx += int(pr.elemEndsEnds[k-1]) + } + if absIdx >= int(pr.elemEndsEnds[k]) { + errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + } + return absIdx +} + +// DataIdx returns the raw element bitstream for the given (section, +// index) pair. +func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string { + absIdx := pr.AbsIdx(k, idx) + + var start uint32 + if absIdx > 0 { + start = pr.elemEnds[absIdx-1] + } + end := pr.elemEnds[absIdx] + + return pr.elemData[start:end] +} + +// StringIdx returns the string value for the given string index. +func (pr *PkgDecoder) StringIdx(idx Index) string { + return pr.DataIdx(RelocString, idx) +} + +// NewDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.NewDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +// NewDecoderRaw returns a Decoder for the given (section, index) pair. +// +// Most callers should use NewDecoder instead. +func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. + r.Data = *strings.NewReader(pr.DataIdx(k, idx)) + + r.Sync(SyncRelocs) + r.Relocs = make([]RelocEnt, r.Len()) + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +// A Decoder provides methods for decoding an individual element's +// bitstream data. +type Decoder struct { + common *PkgDecoder + + Relocs []RelocEnt + Data strings.Reader + + k RelocKind + Idx Index +} + +func (r *Decoder) checkErr(err error) { + if err != nil { + errorf("unexpected decoding error: %w", err) + } +} + +func (r *Decoder) rawUvarint() uint64 { + x, err := binary.ReadUvarint(&r.Data) + r.checkErr(err) + return x +} + +func (r *Decoder) rawVarint() int64 { + ux := r.rawUvarint() + + // Zig-zag decode. + x := int64(ux >> 1) + if ux&1 != 0 { + x = ^x + } + return x +} + +func (r *Decoder) rawReloc(k RelocKind, idx int) Index { + e := r.Relocs[idx] + assert(e.Kind == k) + return e.Idx +} + +// Sync decodes a sync marker from the element bitstream and asserts +// that it matches the expected marker. +// +// If r.common.sync is false, then Sync is a no-op. +func (r *Decoder) Sync(mWant SyncMarker) { + if !r.common.sync { + return + } + + pos, _ := r.Data.Seek(0, os.SEEK_CUR) // TODO(mdempsky): io.SeekCurrent after #44505 is resolved + mHave := SyncMarker(r.rawUvarint()) + writerPCs := make([]int, r.rawUvarint()) + for i := range writerPCs { + writerPCs[i] = int(r.rawUvarint()) + } + + if mHave == mWant { + return + } + + // There's some tension here between printing: + // + // (1) full file paths that tools can recognize (e.g., so emacs + // hyperlinks the "file:line" text for easy navigation), or + // + // (2) short file paths that are easier for humans to read (e.g., by + // omitting redundant or irrelevant details, so it's easier to + // focus on the useful bits that remain). + // + // The current formatting favors the former, as it seems more + // helpful in practice. But perhaps the formatting could be improved + // to better address both concerns. For example, use relative file + // paths if they would be shorter, or rewrite file paths to contain + // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how + // to reliably expand that again. + + fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos) + + fmt.Printf("\nfound %v, written at:\n", mHave) + if len(writerPCs) == 0 { + fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath) + } + for _, pc := range writerPCs { + fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc))) + } + + fmt.Printf("\nexpected %v, reading at:\n", mWant) + var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size? + n := runtime.Callers(2, readerPCs[:]) + for _, pc := range fmtFrames(readerPCs[:n]...) { + fmt.Printf("\t%s\n", pc) + } + + // We already printed a stack trace for the reader, so now we can + // simply exit. Printing a second one with panic or base.Fatalf + // would just be noise. + os.Exit(1) +} + +// Bool decodes and returns a bool value from the element bitstream. +func (r *Decoder) Bool() bool { + r.Sync(SyncBool) + x, err := r.Data.ReadByte() + r.checkErr(err) + assert(x < 2) + return x != 0 +} + +// Int64 decodes and returns an int64 value from the element bitstream. +func (r *Decoder) Int64() int64 { + r.Sync(SyncInt64) + return r.rawVarint() +} + +// Int64 decodes and returns a uint64 value from the element bitstream. +func (r *Decoder) Uint64() uint64 { + r.Sync(SyncUint64) + return r.rawUvarint() +} + +// Len decodes and returns a non-negative int value from the element bitstream. +func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v } + +// Int decodes and returns an int value from the element bitstream. +func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v } + +// Uint decodes and returns a uint value from the element bitstream. +func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v } + +// Code decodes a Code value from the element bitstream and returns +// its ordinal value. It's the caller's responsibility to convert the +// result to an appropriate Code type. +// +// TODO(mdempsky): Ideally this method would have signature "Code[T +// Code] T" instead, but we don't allow generic methods and the +// compiler can't depend on generics yet anyway. +func (r *Decoder) Code(mark SyncMarker) int { + r.Sync(mark) + return r.Len() +} + +// Reloc decodes a relocation of expected section k from the element +// bitstream and returns an index to the referenced element. +func (r *Decoder) Reloc(k RelocKind) Index { + r.Sync(SyncUseReloc) + return r.rawReloc(k, r.Len()) +} + +// String decodes and returns a string value from the element +// bitstream. +func (r *Decoder) String() string { + r.Sync(SyncString) + return r.common.StringIdx(r.Reloc(RelocString)) +} + +// Strings decodes and returns a variable-length slice of strings from +// the element bitstream. +func (r *Decoder) Strings() []string { + res := make([]string, r.Len()) + for i := range res { + res[i] = r.String() + } + return res +} + +// Value decodes and returns a constant.Value from the element +// bitstream. +func (r *Decoder) Value() constant.Value { + r.Sync(SyncValue) + isComplex := r.Bool() + val := r.scalar() + if isComplex { + val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar())) + } + return val +} + +func (r *Decoder) scalar() constant.Value { + switch tag := CodeVal(r.Code(SyncVal)); tag { + default: + panic(fmt.Errorf("unexpected scalar tag: %v", tag)) + + case ValBool: + return constant.MakeBool(r.Bool()) + case ValString: + return constant.MakeString(r.String()) + case ValInt64: + return constant.MakeInt64(r.Int64()) + case ValBigInt: + return constant.Make(r.bigInt()) + case ValBigRat: + num := r.bigInt() + denom := r.bigInt() + return constant.Make(new(big.Rat).SetFrac(num, denom)) + case ValBigFloat: + return constant.Make(r.bigFloat()) + } +} + +func (r *Decoder) bigInt() *big.Int { + v := new(big.Int).SetBytes([]byte(r.String())) + if r.Bool() { + v.Neg(v) + } + return v +} + +func (r *Decoder) bigFloat() *big.Float { + v := new(big.Float).SetPrec(512) + assert(v.UnmarshalText([]byte(r.String())) == nil) + return v +} + +// @@@ Helpers + +// TODO(mdempsky): These should probably be removed. I think they're a +// smell that the export data format is not yet quite right. + +// PeekPkgPath returns the package path for the specified package +// index. +func (pr *PkgDecoder) PeekPkgPath(idx Index) string { + r := pr.NewDecoder(RelocPkg, idx, SyncPkgDef) + path := r.String() + if path == "" { + path = pr.pkgPath + } + return path +} + +// PeekObj returns the package path, object name, and CodeObj for the +// specified object index. +func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { + r := pr.NewDecoder(RelocName, idx, SyncObject1) + r.Sync(SyncSym) + r.Sync(SyncPkg) + path := pr.PeekPkgPath(r.Reloc(RelocPkg)) + name := r.String() + assert(name != "") + + tag := CodeObj(r.Code(SyncCodeObj)) + + return path, name, tag +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/doc.go b/vendor/golang.org/x/tools/go/internal/pkgbits/doc.go new file mode 100644 index 0000000000000..c8a2796b5e4cb --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/doc.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkgbits implements low-level coding abstractions for +// Unified IR's export data format. +// +// At a low-level, a package is a collection of bitstream elements. +// Each element has a "kind" and a dense, non-negative index. +// Elements can be randomly accessed given their kind and index. +// +// Individual elements are sequences of variable-length values (e.g., +// integers, booleans, strings, go/constant values, cross-references +// to other elements). Package pkgbits provides APIs for encoding and +// decoding these low-level values, but the details of mapping +// higher-level Go constructs into elements is left to higher-level +// abstractions. +// +// Elements may cross-reference each other with "relocations." For +// example, an element representing a pointer type has a relocation +// referring to the element type. +// +// Go constructs may be composed as a constellation of multiple +// elements. For example, a declared function may have one element to +// describe the object (e.g., its name, type, position), and a +// separate element to describe its function body. This allows readers +// some flexibility in efficiently seeking or re-reading data (e.g., +// inlining requires re-reading the function body for each inlined +// call, without needing to re-read the object-level details). +// +// This is a copy of internal/pkgbits in the Go implementation. +package pkgbits diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/go/internal/pkgbits/encoder.go new file mode 100644 index 0000000000000..c50c838caaecd --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/encoder.go @@ -0,0 +1,379 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "bytes" + "crypto/md5" + "encoding/binary" + "go/constant" + "io" + "math/big" + "runtime" +) + +// currentVersion is the current version number. +// +// - v0: initial prototype +// +// - v1: adds the flags uint32 word +const currentVersion uint32 = 1 + +// A PkgEncoder provides methods for encoding a package's Unified IR +// export data. +type PkgEncoder struct { + // elems holds the bitstream for previously encoded elements. + elems [numRelocs][]string + + // stringsIdx maps previously encoded strings to their index within + // the RelocString section, to allow deduplication. That is, + // elems[RelocString][stringsIdx[s]] == s (if present). + stringsIdx map[string]Index + + // syncFrames is the number of frames to write at each sync + // marker. A negative value means sync markers are omitted. + syncFrames int +} + +// SyncMarkers reports whether pw uses sync markers. +func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } + +// NewPkgEncoder returns an initialized PkgEncoder. +// +// syncFrames is the number of caller frames that should be serialized +// at Sync points. Serializing additional frames results in larger +// export data files, but can help diagnosing desync errors in +// higher-level Unified IR reader/writer code. If syncFrames is +// negative, then sync markers are omitted entirely. +func NewPkgEncoder(syncFrames int) PkgEncoder { + return PkgEncoder{ + stringsIdx: make(map[string]Index), + syncFrames: syncFrames, + } +} + +// DumpTo writes the package's encoded data to out0 and returns the +// package fingerprint. +func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { + h := md5.New() + out := io.MultiWriter(out0, h) + + writeUint32 := func(x uint32) { + assert(binary.Write(out, binary.LittleEndian, x) == nil) + } + + writeUint32(currentVersion) + + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) + + // Write elemEndsEnds. + var sum uint32 + for _, elems := range &pw.elems { + sum += uint32(len(elems)) + writeUint32(sum) + } + + // Write elemEnds. + sum = 0 + for _, elems := range &pw.elems { + for _, elem := range elems { + sum += uint32(len(elem)) + writeUint32(sum) + } + } + + // Write elemData. + for _, elems := range &pw.elems { + for _, elem := range elems { + _, err := io.WriteString(out, elem) + assert(err == nil) + } + } + + // Write fingerprint. + copy(fingerprint[:], h.Sum(nil)) + _, err := out0.Write(fingerprint[:]) + assert(err == nil) + + return +} + +// StringIdx adds a string value to the strings section, if not +// already present, and returns its index. +func (pw *PkgEncoder) StringIdx(s string) Index { + if idx, ok := pw.stringsIdx[s]; ok { + assert(pw.elems[RelocString][idx] == s) + return idx + } + + idx := Index(len(pw.elems[RelocString])) + pw.elems[RelocString] = append(pw.elems[RelocString], s) + pw.stringsIdx[s] = idx + return idx +} + +// NewEncoder returns an Encoder for a new element within the given +// section, and encodes the given SyncMarker as the start of the +// element bitstream. +func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder { + e := pw.NewEncoderRaw(k) + e.Sync(marker) + return e +} + +// NewEncoderRaw returns an Encoder for a new element within the given +// section. +// +// Most callers should use NewEncoder instead. +func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder { + idx := Index(len(pw.elems[k])) + pw.elems[k] = append(pw.elems[k], "") // placeholder + + return Encoder{ + p: pw, + k: k, + Idx: idx, + } +} + +// An Encoder provides methods for encoding an individual element's +// bitstream data. +type Encoder struct { + p *PkgEncoder + + Relocs []RelocEnt + Data bytes.Buffer // accumulated element bitstream data + + encodingRelocHeader bool + + k RelocKind + Idx Index // index within relocation section +} + +// Flush finalizes the element's bitstream and returns its Index. +func (w *Encoder) Flush() Index { + var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + + // Backup the data so we write the relocations at the front. + var tmp bytes.Buffer + io.Copy(&tmp, &w.Data) + + // TODO(mdempsky): Consider writing these out separately so they're + // easier to strip, along with function bodies, so that we can prune + // down to just the data that's relevant to go/types. + if w.encodingRelocHeader { + panic("encodingRelocHeader already true; recursive flush?") + } + w.encodingRelocHeader = true + w.Sync(SyncRelocs) + w.Len(len(w.Relocs)) + for _, rEnt := range w.Relocs { + w.Sync(SyncReloc) + w.Len(int(rEnt.Kind)) + w.Len(int(rEnt.Idx)) + } + + io.Copy(&sb, &w.Data) + io.Copy(&sb, &tmp) + w.p.elems[w.k][w.Idx] = sb.String() + + return w.Idx +} + +func (w *Encoder) checkErr(err error) { + if err != nil { + errorf("unexpected encoding error: %v", err) + } +} + +func (w *Encoder) rawUvarint(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + _, err := w.Data.Write(buf[:n]) + w.checkErr(err) +} + +func (w *Encoder) rawVarint(x int64) { + // Zig-zag encode. + ux := uint64(x) << 1 + if x < 0 { + ux = ^ux + } + + w.rawUvarint(ux) +} + +func (w *Encoder) rawReloc(r RelocKind, idx Index) int { + // TODO(mdempsky): Use map for lookup; this takes quadratic time. + for i, rEnt := range w.Relocs { + if rEnt.Kind == r && rEnt.Idx == idx { + return i + } + } + + i := len(w.Relocs) + w.Relocs = append(w.Relocs, RelocEnt{r, idx}) + return i +} + +func (w *Encoder) Sync(m SyncMarker) { + if !w.p.SyncMarkers() { + return + } + + // Writing out stack frame string references requires working + // relocations, but writing out the relocations themselves involves + // sync markers. To prevent infinite recursion, we simply trim the + // stack frame for sync markers within the relocation header. + var frames []string + if !w.encodingRelocHeader && w.p.syncFrames > 0 { + pcs := make([]uintptr, w.p.syncFrames) + n := runtime.Callers(2, pcs) + frames = fmtFrames(pcs[:n]...) + } + + // TODO(mdempsky): Save space by writing out stack frames as a + // linked list so we can share common stack frames. + w.rawUvarint(uint64(m)) + w.rawUvarint(uint64(len(frames))) + for _, frame := range frames { + w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame)))) + } +} + +// Bool encodes and writes a bool value into the element bitstream, +// and then returns the bool value. +// +// For simple, 2-alternative encodings, the idiomatic way to call Bool +// is something like: +// +// if w.Bool(x != 0) { +// // alternative #1 +// } else { +// // alternative #2 +// } +// +// For multi-alternative encodings, use Code instead. +func (w *Encoder) Bool(b bool) bool { + w.Sync(SyncBool) + var x byte + if b { + x = 1 + } + err := w.Data.WriteByte(x) + w.checkErr(err) + return b +} + +// Int64 encodes and writes an int64 value into the element bitstream. +func (w *Encoder) Int64(x int64) { + w.Sync(SyncInt64) + w.rawVarint(x) +} + +// Uint64 encodes and writes a uint64 value into the element bitstream. +func (w *Encoder) Uint64(x uint64) { + w.Sync(SyncUint64) + w.rawUvarint(x) +} + +// Len encodes and writes a non-negative int value into the element bitstream. +func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) } + +// Int encodes and writes an int value into the element bitstream. +func (w *Encoder) Int(x int) { w.Int64(int64(x)) } + +// Len encodes and writes a uint value into the element bitstream. +func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) } + +// Reloc encodes and writes a relocation for the given (section, +// index) pair into the element bitstream. +// +// Note: Only the index is formally written into the element +// bitstream, so bitstream decoders must know from context which +// section an encoded relocation refers to. +func (w *Encoder) Reloc(r RelocKind, idx Index) { + w.Sync(SyncUseReloc) + w.Len(w.rawReloc(r, idx)) +} + +// Code encodes and writes a Code value into the element bitstream. +func (w *Encoder) Code(c Code) { + w.Sync(c.Marker()) + w.Len(c.Value()) +} + +// String encodes and writes a string value into the element +// bitstream. +// +// Internally, strings are deduplicated by adding them to the strings +// section (if not already present), and then writing a relocation +// into the element bitstream. +func (w *Encoder) String(s string) { + w.Sync(SyncString) + w.Reloc(RelocString, w.p.StringIdx(s)) +} + +// Strings encodes and writes a variable-length slice of strings into +// the element bitstream. +func (w *Encoder) Strings(ss []string) { + w.Len(len(ss)) + for _, s := range ss { + w.String(s) + } +} + +// Value encodes and writes a constant.Value into the element +// bitstream. +func (w *Encoder) Value(val constant.Value) { + w.Sync(SyncValue) + if w.Bool(val.Kind() == constant.Complex) { + w.scalar(constant.Real(val)) + w.scalar(constant.Imag(val)) + } else { + w.scalar(val) + } +} + +func (w *Encoder) scalar(val constant.Value) { + switch v := constant.Val(val).(type) { + default: + errorf("unhandled %v (%v)", val, val.Kind()) + case bool: + w.Code(ValBool) + w.Bool(v) + case string: + w.Code(ValString) + w.String(v) + case int64: + w.Code(ValInt64) + w.Int64(v) + case *big.Int: + w.Code(ValBigInt) + w.bigInt(v) + case *big.Rat: + w.Code(ValBigRat) + w.bigInt(v.Num()) + w.bigInt(v.Denom()) + case *big.Float: + w.Code(ValBigFloat) + w.bigFloat(v) + } +} + +func (w *Encoder) bigInt(v *big.Int) { + b := v.Bytes() + w.String(string(b)) // TODO: More efficient encoding. + w.Bool(v.Sign() < 0) +} + +func (w *Encoder) bigFloat(v *big.Float) { + b := v.Append(nil, 'p', -1) + w.String(string(b)) // TODO: More efficient encoding. +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/flags.go b/vendor/golang.org/x/tools/go/internal/pkgbits/flags.go new file mode 100644 index 0000000000000..654222745facd --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/flags.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +const ( + flagSyncMarkers = 1 << iota // file format contains sync markers +) diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go1.go new file mode 100644 index 0000000000000..5294f6a63edd7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go1.go @@ -0,0 +1,21 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 +// +build !go1.7 + +// TODO(mdempsky): Remove after #44505 is resolved + +package pkgbits + +import "runtime" + +func walkFrames(pcs []uintptr, visit frameVisitor) { + for _, pc := range pcs { + fn := runtime.FuncForPC(pc) + file, line := fn.FileLine(pc) + + visit(file, line, fn.Name(), pc-fn.Entry()) + } +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go17.go new file mode 100644 index 0000000000000..2324ae7adfe20 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go17.go @@ -0,0 +1,28 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 +// +build go1.7 + +package pkgbits + +import "runtime" + +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/reloc.go b/vendor/golang.org/x/tools/go/internal/pkgbits/reloc.go new file mode 100644 index 0000000000000..7a8f04ab3fc66 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/reloc.go @@ -0,0 +1,42 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A RelocKind indicates a particular section within a unified IR export. +type RelocKind int + +// An Index represents a bitstream element index within a particular +// section. +type Index int + +// A relocEnt (relocation entry) is an entry in an element's local +// reference table. +// +// TODO(mdempsky): Rename this too. +type RelocEnt struct { + Kind RelocKind + Idx Index +} + +// Reserved indices within the meta relocation section. +const ( + PublicRootIdx Index = 0 + PrivateRootIdx Index = 1 +) + +const ( + RelocString RelocKind = iota + RelocMeta + RelocPosBase + RelocPkg + RelocName + RelocType + RelocObj + RelocObjExt + RelocObjDict + RelocBody + + numRelocs = iota +) diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/support.go b/vendor/golang.org/x/tools/go/internal/pkgbits/support.go new file mode 100644 index 0000000000000..ad26d3b28cae1 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/support.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import "fmt" + +func assert(b bool) { + if !b { + panic("assertion failed") + } +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Errorf(format, args...)) +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/go/internal/pkgbits/sync.go new file mode 100644 index 0000000000000..5bd51ef717007 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/sync.go @@ -0,0 +1,113 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "fmt" + "strings" +) + +// fmtFrames formats a backtrace for reporting reader/writer desyncs. +func fmtFrames(pcs ...uintptr) []string { + res := make([]string, 0, len(pcs)) + walkFrames(pcs, func(file string, line int, name string, offset uintptr) { + // Trim package from function name. It's just redundant noise. + name = strings.TrimPrefix(name, "cmd/compile/internal/noder.") + + res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset)) + }) + return res +} + +type frameVisitor func(file string, line int, name string, offset uintptr) + +// SyncMarker is an enum type that represents markers that may be +// written to export data to ensure the reader and writer stay +// synchronized. +type SyncMarker int + +//go:generate stringer -type=SyncMarker -trimprefix=Sync + +const ( + _ SyncMarker = iota + + // Public markers (known to go/types importers). + + // Low-level coding markers. + SyncEOF + SyncBool + SyncInt64 + SyncUint64 + SyncString + SyncValue + SyncVal + SyncRelocs + SyncReloc + SyncUseReloc + + // Higher-level object and type markers. + SyncPublic + SyncPos + SyncPosBase + SyncObject + SyncObject1 + SyncPkg + SyncPkgDef + SyncMethod + SyncType + SyncTypeIdx + SyncTypeParamNames + SyncSignature + SyncParams + SyncParam + SyncCodeObj + SyncSym + SyncLocalIdent + SyncSelector + + // Private markers (only known to cmd/compile). + SyncPrivate + + SyncFuncExt + SyncVarExt + SyncTypeExt + SyncPragma + + SyncExprList + SyncExprs + SyncExpr + SyncExprType + SyncAssign + SyncOp + SyncFuncLit + SyncCompLit + + SyncDecl + SyncFuncBody + SyncOpenScope + SyncCloseScope + SyncCloseAnotherScope + SyncDeclNames + SyncDeclName + + SyncStmts + SyncBlockStmt + SyncIfStmt + SyncForStmt + SyncSwitchStmt + SyncRangeStmt + SyncCaseClause + SyncCommClause + SyncSelectStmt + SyncDecls + SyncLabeledStmt + SyncUseObjLocal + SyncAddLocal + SyncLinkname + SyncStmt1 + SyncStmtsEnd + SyncLabel + SyncOptLabel +) diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/go/internal/pkgbits/syncmarker_string.go new file mode 100644 index 0000000000000..4a5b0ca5f2ffc --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/syncmarker_string.go @@ -0,0 +1,89 @@ +// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT. + +package pkgbits + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SyncEOF-1] + _ = x[SyncBool-2] + _ = x[SyncInt64-3] + _ = x[SyncUint64-4] + _ = x[SyncString-5] + _ = x[SyncValue-6] + _ = x[SyncVal-7] + _ = x[SyncRelocs-8] + _ = x[SyncReloc-9] + _ = x[SyncUseReloc-10] + _ = x[SyncPublic-11] + _ = x[SyncPos-12] + _ = x[SyncPosBase-13] + _ = x[SyncObject-14] + _ = x[SyncObject1-15] + _ = x[SyncPkg-16] + _ = x[SyncPkgDef-17] + _ = x[SyncMethod-18] + _ = x[SyncType-19] + _ = x[SyncTypeIdx-20] + _ = x[SyncTypeParamNames-21] + _ = x[SyncSignature-22] + _ = x[SyncParams-23] + _ = x[SyncParam-24] + _ = x[SyncCodeObj-25] + _ = x[SyncSym-26] + _ = x[SyncLocalIdent-27] + _ = x[SyncSelector-28] + _ = x[SyncPrivate-29] + _ = x[SyncFuncExt-30] + _ = x[SyncVarExt-31] + _ = x[SyncTypeExt-32] + _ = x[SyncPragma-33] + _ = x[SyncExprList-34] + _ = x[SyncExprs-35] + _ = x[SyncExpr-36] + _ = x[SyncExprType-37] + _ = x[SyncAssign-38] + _ = x[SyncOp-39] + _ = x[SyncFuncLit-40] + _ = x[SyncCompLit-41] + _ = x[SyncDecl-42] + _ = x[SyncFuncBody-43] + _ = x[SyncOpenScope-44] + _ = x[SyncCloseScope-45] + _ = x[SyncCloseAnotherScope-46] + _ = x[SyncDeclNames-47] + _ = x[SyncDeclName-48] + _ = x[SyncStmts-49] + _ = x[SyncBlockStmt-50] + _ = x[SyncIfStmt-51] + _ = x[SyncForStmt-52] + _ = x[SyncSwitchStmt-53] + _ = x[SyncRangeStmt-54] + _ = x[SyncCaseClause-55] + _ = x[SyncCommClause-56] + _ = x[SyncSelectStmt-57] + _ = x[SyncDecls-58] + _ = x[SyncLabeledStmt-59] + _ = x[SyncUseObjLocal-60] + _ = x[SyncAddLocal-61] + _ = x[SyncLinkname-62] + _ = x[SyncStmt1-63] + _ = x[SyncStmtsEnd-64] + _ = x[SyncLabel-65] + _ = x[SyncOptLabel-66] +} + +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" + +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} + +func (i SyncMarker) String() string { + i -= 1 + if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) { + return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]] +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index 4bfe28a51ff52..da4ab89fe63f1 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -67,7 +67,6 @@ Most tools should pass their command-line arguments (after any flags) uninterpreted to the loader, so that the loader can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. - */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 0e1e7f11fee8c..de881562de1d7 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -26,7 +26,6 @@ import ( "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/xerrors" ) // debug controls verbose logging. @@ -303,11 +302,12 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries } dirResponse, err := state.createDriverResponse(pattern) - // If there was an error loading the package, or the package is returned - // with errors, try to load the file as an ad-hoc package. + // If there was an error loading the package, or no packages are returned, + // or the package is returned with errors, try to load the file as an + // ad-hoc package. // Usually the error will appear in a returned package, but may not if we're // in module mode and the ad-hoc is located outside a module. - if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && len(dirResponse.Packages[0].Errors) == 1 { var queryErr error if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { @@ -393,6 +393,8 @@ type jsonPackage struct { CompiledGoFiles []string IgnoredGoFiles []string IgnoredOtherFiles []string + EmbedPatterns []string + EmbedFiles []string CFiles []string CgoFiles []string CXXFiles []string @@ -444,7 +446,11 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse // Run "go list" for complete // information on the specified packages. - buf, err := state.invokeGo("list", golistargs(state.cfg, words)...) + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + buf, err := state.invokeGo("list", golistargs(state.cfg, words, goVersion)...) if err != nil { return nil, err } @@ -565,6 +571,8 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), + EmbedFiles: absJoin(p.Dir, p.EmbedFiles), + EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), forTest: p.ForTest, depsErrors: p.DepsErrors, @@ -805,17 +813,83 @@ func absJoin(dir string, fileses ...[]string) (res []string) { return res } -func golistargs(cfg *Config, words []string) []string { +func jsonFlag(cfg *Config, goVersion int) string { + if goVersion < 19 { + return "-json" + } + var fields []string + added := make(map[string]bool) + addFields := func(fs ...string) { + for _, f := range fs { + if !added[f] { + added[f] = true + fields = append(fields, f) + } + } + } + addFields("Name", "ImportPath", "Error") // These fields are always needed + if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", + "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", + "SwigFiles", "SwigCXXFiles", "SysoFiles") + if cfg.Tests { + addFields("TestGoFiles", "XTestGoFiles") + } + } + if cfg.Mode&NeedTypes != 0 { + // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, + // even when -compiled isn't passed in. + // TODO(#52435): Should we make the test ask for -compiled, or automatically + // request CompiledGoFiles in certain circumstances? + addFields("Dir", "CompiledGoFiles") + } + if cfg.Mode&NeedCompiledGoFiles != 0 { + addFields("Dir", "CompiledGoFiles", "Export") + } + if cfg.Mode&NeedImports != 0 { + // When imports are requested, DepOnly is used to distinguish between packages + // explicitly requested and transitive imports of those packages. + addFields("DepOnly", "Imports", "ImportMap") + if cfg.Tests { + addFields("TestImports", "XTestImports") + } + } + if cfg.Mode&NeedDeps != 0 { + addFields("DepOnly") + } + if usesExportData(cfg) { + // Request Dir in the unlikely case Export is not absolute. + addFields("Dir", "Export") + } + if cfg.Mode&needInternalForTest != 0 { + addFields("ForTest") + } + if cfg.Mode&needInternalDepsErrors != 0 { + addFields("DepsErrors") + } + if cfg.Mode&NeedModule != 0 { + addFields("Module") + } + if cfg.Mode&NeedEmbedFiles != 0 { + addFields("EmbedFiles") + } + if cfg.Mode&NeedEmbedPatterns != 0 { + addFields("EmbedPatterns") + } + return "-json=" + strings.Join(fields, ",") +} + +func golistargs(cfg *Config, words []string, goVersion int) []string { const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo fullargs := []string{ - "-e", "-json", + "-e", jsonFlag(cfg, goVersion), fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), fmt.Sprintf("-test=%t", cfg.Tests), fmt.Sprintf("-export=%t", usesExportData(cfg)), fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), // go list doesn't let you pass -test and -find together, // probably because you'd just get the TestMain. - fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0), + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), } fullargs = append(fullargs, cfg.BuildFlags...) fullargs = append(fullargs, "--") @@ -879,7 +953,7 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, if !ok { // Catastrophic error: // - context cancellation - return nil, xerrors.Errorf("couldn't run 'go': %w", err) + return nil, fmt.Errorf("couldn't run 'go': %w", err) } // Old go version? diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 7ea37e7eeac3c..5c080d21b54a9 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -15,7 +15,7 @@ var allModes = []LoadMode{ NeedCompiledGoFiles, NeedImports, NeedDeps, - NeedExportsFile, + NeedExportFile, NeedTypes, NeedSyntax, NeedTypesInfo, @@ -28,7 +28,7 @@ var modeStrings = []string{ "NeedCompiledGoFiles", "NeedImports", "NeedDeps", - "NeedExportsFile", + "NeedExportFile", "NeedTypes", "NeedSyntax", "NeedTypesInfo", diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 1b5424e78f7a0..a93dc6add4d8e 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -39,9 +39,6 @@ import ( // Load may return more information than requested. type LoadMode int -// TODO(matloob): When a V2 of go/packages is released, rename NeedExportsFile to -// NeedExportFile to make it consistent with the Package field it's adding. - const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota @@ -59,8 +56,8 @@ const ( // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. NeedDeps - // NeedExportsFile adds ExportFile. - NeedExportsFile + // NeedExportFile adds ExportFile. + NeedExportFile // NeedTypes adds Types, Fset, and IllTyped. NeedTypes @@ -74,12 +71,25 @@ const ( // NeedTypesSizes adds TypesSizes. NeedTypesSizes + // needInternalDepsErrors adds the internal deps errors field for use by gopls. + needInternalDepsErrors + + // needInternalForTest adds the internal forTest field. + // Tests must also be set on the context for this field to be populated. + needInternalForTest + // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. // Modifies CompiledGoFiles and Types, and has no effect on its own. typecheckCgo // NeedModule adds Module. NeedModule + + // NeedEmbedFiles adds EmbedFiles. + NeedEmbedFiles + + // NeedEmbedPatterns adds EmbedPatterns. + NeedEmbedPatterns ) const ( @@ -102,6 +112,9 @@ const ( // Deprecated: LoadAllSyntax exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadAllSyntax = LoadSyntax | NeedDeps + + // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + NeedExportsFile = NeedExportFile ) // A Config specifies details about how packages should be loaded. @@ -296,6 +309,14 @@ type Package struct { // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. OtherFiles []string + // EmbedFiles lists the absolute file paths of the package's files + // embedded with go:embed. + EmbedFiles []string + + // EmbedPatterns lists the absolute file patterns of the package's + // files embedded with go:embed. + EmbedPatterns []string + // IgnoredFiles lists source files that are not part of the package // using the current build configuration but that might be part of // the package using other build configurations. @@ -389,6 +410,8 @@ func init() { config.(*Config).modFlag = value } packagesinternal.TypecheckCgo = int(typecheckCgo) + packagesinternal.DepsErrors = int(needInternalDepsErrors) + packagesinternal.ForTest = int(needInternalForTest) } // An Error describes a problem with a package's metadata, syntax, or types. @@ -431,6 +454,8 @@ type flatPackage struct { GoFiles []string `json:",omitempty"` CompiledGoFiles []string `json:",omitempty"` OtherFiles []string `json:",omitempty"` + EmbedFiles []string `json:",omitempty"` + EmbedPatterns []string `json:",omitempty"` IgnoredFiles []string `json:",omitempty"` ExportFile string `json:",omitempty"` Imports map[string]string `json:",omitempty"` @@ -454,6 +479,8 @@ func (p *Package) MarshalJSON() ([]byte, error) { GoFiles: p.GoFiles, CompiledGoFiles: p.CompiledGoFiles, OtherFiles: p.OtherFiles, + EmbedFiles: p.EmbedFiles, + EmbedPatterns: p.EmbedPatterns, IgnoredFiles: p.IgnoredFiles, ExportFile: p.ExportFile, } @@ -481,6 +508,8 @@ func (p *Package) UnmarshalJSON(b []byte) error { GoFiles: flat.GoFiles, CompiledGoFiles: flat.CompiledGoFiles, OtherFiles: flat.OtherFiles, + EmbedFiles: flat.EmbedFiles, + EmbedPatterns: flat.EmbedPatterns, ExportFile: flat.ExportFile, } if len(flat.Imports) > 0 { @@ -614,7 +643,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || // ... or if we need types and the exportData is invalid. We fall back to (incompletely) // typechecking packages from source if they fail to compile. - (ld.Mode&NeedTypes|NeedTypesInfo != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" + (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" lpkg := &loaderPackage{ Package: pkg, needtypes: needtypes, @@ -752,13 +781,19 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { ld.pkgs[i].OtherFiles = nil ld.pkgs[i].IgnoredFiles = nil } + if ld.requestedMode&NeedEmbedFiles == 0 { + ld.pkgs[i].EmbedFiles = nil + } + if ld.requestedMode&NeedEmbedPatterns == 0 { + ld.pkgs[i].EmbedPatterns = nil + } if ld.requestedMode&NeedCompiledGoFiles == 0 { ld.pkgs[i].CompiledGoFiles = nil } if ld.requestedMode&NeedImports == 0 { ld.pkgs[i].Imports = nil } - if ld.requestedMode&NeedExportsFile == 0 { + if ld.requestedMode&NeedExportFile == 0 { ld.pkgs[i].ExportFile = "" } if ld.requestedMode&NeedTypes == 0 { @@ -1053,7 +1088,6 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { // // Because files are scanned in parallel, the token.Pos // positions of the resulting ast.Files are not ordered. -// func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { var wg sync.WaitGroup n := len(filenames) @@ -1097,7 +1131,6 @@ func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { // sameFile returns true if x and y have the same basename and denote // the same file. -// func sameFile(x, y string) bool { if x == y { // It could be the case that y doesn't exist. @@ -1210,8 +1243,13 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error if err != nil { return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) } + if _, ok := view["go.shape"]; ok { + // Account for the pseudopackage "go.shape" that gets + // created by generic code. + viewLen++ + } if viewLen != len(view) { - log.Fatalf("Unexpected package creation during export data loading") + log.Panicf("golang.org/x/tools/go/packages: unexpected new packages during load of %s", lpkg.PkgPath) } lpkg.Types = tpkg @@ -1222,17 +1260,8 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error // impliedLoadMode returns loadMode with its dependencies. func impliedLoadMode(loadMode LoadMode) LoadMode { - if loadMode&NeedTypesInfo != 0 && loadMode&NeedImports == 0 { - // If NeedTypesInfo, go/packages needs to do typechecking itself so it can - // associate type info with the AST. To do so, we need the export data - // for dependencies, which means we need to ask for the direct dependencies. - // NeedImports is used to ask for the direct dependencies. - loadMode |= NeedImports - } - - if loadMode&NeedDeps != 0 && loadMode&NeedImports == 0 { - // With NeedDeps we need to load at least direct dependencies. - // NeedImports is used to ask for the direct dependencies. + if loadMode&(NeedDeps|NeedTypes|NeedTypesInfo) != 0 { + // All these things require knowing the import graph. loadMode |= NeedImports } @@ -1240,5 +1269,5 @@ func impliedLoadMode(loadMode LoadMode) LoadMode { } func usesExportData(cfg *Config) bool { - return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 + return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 } diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index f753368346584..67256dc3974cc 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -264,8 +264,10 @@ func cmdDebugStr(cmd *exec.Cmd) string { env := make(map[string]string) for _, kv := range cmd.Env { split := strings.SplitN(kv, "=", 2) - k, v := split[0], split[1] - env[k] = v + if len(split) == 2 { + k, v := split[0], split[1] + env[k] = v + } } var args []string diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index 9702094c59edd..d9950b1f0bef9 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -23,6 +23,8 @@ var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} var TypecheckCgo int +var DepsErrors int // must be set as a LoadMode to call GetDepsErrors +var ForTest int // must be set as a LoadMode to call GetForTest var SetModFlag = func(config interface{}, value string) {} var SetModFile = func(config interface{}, value string) {} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index ab6b30b83e45b..25a1426d30ec2 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -16,11 +16,10 @@ // Additionally, this package contains common utilities for working with the // new generic constructs, to supplement the standard library APIs. Notably, // the StructuralTerms API computes a minimal representation of the structural -// restrictions on a type parameter. In the future, this API may be available -// from go/types. +// restrictions on a type parameter. // -// See the example/README.md for a more detailed guide on how to update tools -// to support generics. +// An external version of these APIs is available in the +// golang.org/x/exp/typeparams module. package typeparams import ( @@ -121,15 +120,15 @@ func OriginMethod(fn *types.Func) *types.Func { // // For example, consider the following type declarations: // -// type Interface[T any] interface { -// Accept(T) -// } +// type Interface[T any] interface { +// Accept(T) +// } // -// type Container[T any] struct { -// Element T -// } +// type Container[T any] struct { +// Element T +// } // -// func (c Container[T]) Accept(t T) { c.Element = t } +// func (c Container[T]) Accept(t T) { c.Element = t } // // In this case, GenericAssignableTo reports that instantiations of Container // are assignable to the corresponding instantiation of Interface. diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go new file mode 100644 index 0000000000000..993135ec90e89 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -0,0 +1,122 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "go/types" +) + +// CoreType returns the core type of T or nil if T does not have a core type. +// +// See https://go.dev/ref/spec#Core_types for the definition of a core type. +func CoreType(T types.Type) types.Type { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return U // for non-interface types, + } + + terms, err := _NormalTerms(U) + if len(terms) == 0 || err != nil { + // len(terms) -> empty type set of interface. + // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. + return nil // no core type. + } + + U = terms[0].Type().Underlying() + var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) + for identical = 1; identical < len(terms); identical++ { + if !types.Identical(U, terms[identical].Type().Underlying()) { + break + } + } + + if identical == len(terms) { + // https://go.dev/ref/spec#Core_types + // "There is a single type U which is the underlying type of all types in the type set of T" + return U + } + ch, ok := U.(*types.Chan) + if !ok { + return nil // no core type as identical < len(terms) and U is not a channel. + } + // https://go.dev/ref/spec#Core_types + // "the type chan E if T contains only bidirectional channels, or the type chan<- E or + // <-chan E depending on the direction of the directional channels present." + for chans := identical; chans < len(terms); chans++ { + curr, ok := terms[chans].Type().Underlying().(*types.Chan) + if !ok { + return nil + } + if !types.Identical(ch.Elem(), curr.Elem()) { + return nil // channel elements are not identical. + } + if ch.Dir() == types.SendRecv { + // ch is bidirectional. We can safely always use curr's direction. + ch = curr + } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { + // ch and curr are not bidirectional and not the same direction. + return nil + } + } + return ch +} + +// _NormalTerms returns a slice of terms representing the normalized structural +// type restrictions of a type, if any. +// +// For all types other than *types.TypeParam, *types.Interface, and +// *types.Union, this is just a single term with Tilde() == false and +// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see +// below. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration type +// T[P interface{~int; m()}] int the structural restriction of the type +// parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// _NormalTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, _NormalTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the type is +// invalid, exceeds complexity bounds, or has an empty type set. In the latter +// case, _NormalTerms returns ErrEmptyTypeSet. +// +// _NormalTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func _NormalTerms(typ types.Type) ([]*Term, error) { + switch typ := typ.(type) { + case *TypeParam: + return StructuralTerms(typ) + case *Union: + return UnionTermSet(typ) + case *types.Interface: + return InterfaceTermSet(typ) + default: + return []*Term{NewTerm(false, typ)}, nil + } +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go index 090f142a5f34b..9c631b6512ded 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -24,20 +24,22 @@ var ErrEmptyTypeSet = errors.New("empty type set") // Structural type restrictions of a type parameter are created via // non-interface types embedded in its constraint interface (directly, or via a // chain of interface embeddings). For example, in the declaration -// type T[P interface{~int; m()}] int +// +// type T[P interface{~int; m()}] int +// // the structural restriction of the type parameter P is ~int. // // With interface embedding and unions, the specification of structural type // restrictions may be arbitrarily complex. For example, consider the // following: // -// type A interface{ ~string|~[]byte } +// type A interface{ ~string|~[]byte } // -// type B interface{ int|string } +// type B interface{ int|string } // -// type C interface { ~string|~int } +// type C interface { ~string|~int } // -// type T[P interface{ A|B; C }] int +// type T[P interface{ A|B; C }] int // // In this example, the structural type restriction of P is ~string|int: A|B // expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go index 10857d504c4fa..933106a23dd43 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -97,15 +97,6 @@ func (xl termlist) norm() termlist { return rl } -// If the type set represented by xl is specified by a single (non-𝓤) term, -// structuralType returns that type. Otherwise it returns nil. -func (xl termlist) structuralType() types.Type { - if nl := xl.norm(); len(nl) == 1 { - return nl[0].typ // if nl.isAll() then typ is nil, which is ok - } - return nil -} - // union returns the union xl ∪ yl. func (xl termlist) union(yl termlist) termlist { return append(xl, yl...).norm() diff --git a/vendor/golang.org/x/xerrors/fmt.go b/vendor/golang.org/x/xerrors/fmt.go index 6df18669fac3e..27a5d70bd6ec5 100644 --- a/vendor/golang.org/x/xerrors/fmt.go +++ b/vendor/golang.org/x/xerrors/fmt.go @@ -34,7 +34,8 @@ const percentBangString = "%!" // operand that does not implement the error interface. The %w verb is otherwise // a synonym for %v. // -// Deprecated: As of Go 1.13, use fmt.Errorf instead. +// Note that as of Go 1.13, the fmt.Errorf function will do error formatting, +// but it will not capture a stack backtrace. func Errorf(format string, a ...interface{}) error { format = formatPlusW(format) // Support a ": %[wsv]" suffix, which works well with xerrors.Formatter. diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json index ec9c4620df551..d73517d78ff94 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json @@ -948,7 +948,7 @@ "parameterOrder": [], "parameters": { "filter": { - "description": "Optional. An expression for filtering the results of the request. Filter rules are case insensitive. If multiple fields are included in a filter query, the query will return results that match any of the fields. Some eligible fields for filtering are: + `name` + `id` + `labels.` (where *key* is the name of a label) + `parent.type` + `parent.id` + `lifecycleState` Some examples of filter queries: | Query | Description | |------------------|-----------------------------------------------------| | name:how* | The project's name starts with \"how\". | | name:Howl | The project's name is `Howl` or `howl`. | | name:HOWL | Equivalent to above. | | NAME:howl | Equivalent to above. | | labels.color:* | The project has the label `color`. | | labels.color:red | The project's label `color` has the value `red`. | | labels.color:red labels.size:big | The project's label `color` has the value `red` and its label `size` has the value `big`.| | lifecycleState:DELETE_REQUESTED | Only show projects that are pending deletion.| If no filter is specified, the call will return projects for which the user has the `resourcemanager.projects.get` permission. NOTE: To perform a by-parent query (eg., what projects are directly in a Folder), the caller must have the `resourcemanager.projects.list` permission on the parent and the filter must contain both a `parent.type` and a `parent.id` restriction (example: \"parent.type:folder parent.id:123\"). In this case an alternate search index is used which provides more consistent results.", + "description": "Optional. An expression for filtering the results of the request. Filter rules are case insensitive. If multiple fields are included in a filter query, the query will return results that match any of the fields. Some eligible fields for filtering are: + `name` + `id` + `labels.` (where *key* is the name of a label) + `parent.type` + `parent.id` + `lifecycleState` Some examples of filter queries: | Query | Description | |------------------|-----------------------------------------------------| | name:how* | The project's name starts with \"how\". | | name:Howl | The project's name is `Howl` or `howl`. | | name:HOWL | Equivalent to above. | | NAME:howl | Equivalent to above. | | labels.color:* | The project has the label `color`. | | labels.color:red | The project's label `color` has the value `red`. | | labels.color:red labels.size:big | The project's label `color` has the value `red` or its label `size` has the value `big`. | | lifecycleState:DELETE_REQUESTED | Only show projects that are pending deletion.| If no filter is specified, the call will return projects for which the user has the `resourcemanager.projects.get` permission. NOTE: To perform a by-parent query (eg., what projects are directly in a Folder), the caller must have the `resourcemanager.projects.list` permission on the parent and the filter must contain both a `parent.type` and a `parent.id` restriction (example: \"parent.type:folder parent.id:123\"). In this case an alternate search index is used which provides more consistent results.", "location": "query", "type": "string" }, @@ -1171,7 +1171,7 @@ } } }, - "revision": "20220501", + "revision": "20220901", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { @@ -1242,7 +1242,7 @@ "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", + "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go index 24ea763e15367..012a50a87098d 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go @@ -8,35 +8,35 @@ // // For product documentation, see: https://cloud.google.com/resource-manager // -// Creating a client +// # Creating a client // // Usage example: // -// import "google.golang.org/api/cloudresourcemanager/v1" -// ... -// ctx := context.Background() -// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx) +// import "google.golang.org/api/cloudresourcemanager/v1" +// ... +// ctx := context.Background() +// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // -// Other authentication options +// # Other authentication options // // By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: // -// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithScopes(cloudresourcemanager.CloudPlatformReadOnlyScope)) +// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithScopes(cloudresourcemanager.CloudPlatformReadOnlyScope)) // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // -// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithAPIKey("AIza...")) +// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package cloudresourcemanager // import "google.golang.org/api/cloudresourcemanager/v1" @@ -345,19 +345,24 @@ type Binding struct { // `allUsers`: A special identifier that represents anyone who is on the // internet; with or without a Google account. * // `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. * - // `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . * - // `serviceAccount:{emailid}`: An email address that represents a - // service account. For example, - // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An - // email address that represents a Google group. For example, - // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An - // email address (plus unique identifier) representing a user that has - // been recently deleted. For example, - // `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered - // user retains the role in the binding. * + // who is authenticated with a Google account or a service account. Does + // not include identities that come from external identity providers + // (IdPs) through identity federation. * `user:{emailid}`: An email + // address that represents a specific Google account. For example, + // `alice@example.com` . * `serviceAccount:{emailid}`: An email address + // that represents a Google service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * + // `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: + // An identifier for a Kubernetes service account + // (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). + // For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. + // * `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. * + // `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a user that has been recently + // deleted. For example, `alice@example.com?uid=123456789012345678901`. + // If the user is recovered, this value reverts to `user:{emailid}` and + // the recovered user retains the role in the binding. * // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address // (plus unique identifier) representing a service account that has been // recently deleted. For example, @@ -496,7 +501,8 @@ func (s *ClearOrgPolicyRequest) MarshalJSON() ([]byte, error) { } // CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation: -// Metadata describing a long running folder operation +// +// Metadata describing a long running folder operation type CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation struct { // DestinationParent: The resource name of the folder or organization we // are either creating the folder under or moving the folder to. @@ -2517,17 +2523,17 @@ func (c *FoldersClearOrgPolicyCall) Do(opts ...googleapi.CallOption) (*Empty, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -2588,8 +2594,8 @@ type FoldersGetEffectiveOrgPolicyCall struct { // computed `Policy` across multiple resources. Subtrees of Resource // Manager resource hierarchy with 'under:' prefix will not be expanded. // -// - resource: The name of the resource to start computing the effective -// `Policy`. +// - resource: The name of the resource to start computing the effective +// `Policy`. func (r *FoldersService) GetEffectiveOrgPolicy(resource string, geteffectiveorgpolicyrequest *GetEffectiveOrgPolicyRequest) *FoldersGetEffectiveOrgPolicyCall { c := &FoldersGetEffectiveOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2664,17 +2670,17 @@ func (c *FoldersGetEffectiveOrgPolicyCall) Do(opts ...googleapi.CallOption) (*Or if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -2811,17 +2817,17 @@ func (c *FoldersGetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPolicy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -2957,17 +2963,17 @@ func (c *FoldersListAvailableOrgPolicyConstraintsCall) Do(opts ...googleapi.Call if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListAvailableOrgPolicyConstraintsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -3122,17 +3128,17 @@ func (c *FoldersListOrgPoliciesCall) Do(opts ...googleapi.CallOption) (*ListOrgP if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListOrgPoliciesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -3289,17 +3295,17 @@ func (c *FoldersSetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPolicy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -3428,17 +3434,17 @@ func (c *LiensCreateCall) Do(opts ...googleapi.CallOption) (*Lien, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Lien{ ServerResponse: googleapi.ServerResponse{ @@ -3557,17 +3563,17 @@ func (c *LiensDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -3707,17 +3713,17 @@ func (c *LiensGetCall) Do(opts ...googleapi.CallOption) (*Lien, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Lien{ ServerResponse: googleapi.ServerResponse{ @@ -3877,17 +3883,17 @@ func (c *LiensListCall) Do(opts ...googleapi.CallOption) (*ListLiensResponse, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListLiensResponse{ ServerResponse: googleapi.ServerResponse{ @@ -4054,17 +4060,17 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -4194,17 +4200,17 @@ func (c *OrganizationsClearOrgPolicyCall) Do(opts ...googleapi.CallOption) (*Emp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -4262,10 +4268,10 @@ type OrganizationsGetCall struct { // Get: Fetches an Organization resource identified by the specified // resource name. // -// - name: The resource name of the Organization to fetch. This is the -// organization's relative path in the API, formatted as -// "organizations/[organizationId]". For example, -// "organizations/1234". +// - name: The resource name of the Organization to fetch. This is the +// organization's relative path in the API, formatted as +// "organizations/[organizationId]". For example, +// "organizations/1234". func (r *OrganizationsService) Get(name string) *OrganizationsGetCall { c := &OrganizationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4347,17 +4353,17 @@ func (c *OrganizationsGetCall) Do(opts ...googleapi.CallOption) (*Organization, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Organization{ ServerResponse: googleapi.ServerResponse{ @@ -4416,8 +4422,8 @@ type OrganizationsGetEffectiveOrgPolicyCall struct { // computed `Policy` across multiple resources. Subtrees of Resource // Manager resource hierarchy with 'under:' prefix will not be expanded. // -// - resource: The name of the resource to start computing the effective -// `Policy`. +// - resource: The name of the resource to start computing the effective +// `Policy`. func (r *OrganizationsService) GetEffectiveOrgPolicy(resource string, geteffectiveorgpolicyrequest *GetEffectiveOrgPolicyRequest) *OrganizationsGetEffectiveOrgPolicyCall { c := &OrganizationsGetEffectiveOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4492,17 +4498,17 @@ func (c *OrganizationsGetEffectiveOrgPolicyCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -4565,10 +4571,10 @@ type OrganizationsGetIamPolicyCall struct { // `resourcemanager.organizations.getIamPolicy` on the specified // organization // -// - resource: REQUIRED: The resource for which the policy is being -// requested. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy is being +// requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *OrganizationsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *OrganizationsGetIamPolicyCall { c := &OrganizationsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4643,17 +4649,17 @@ func (c *OrganizationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -4790,17 +4796,17 @@ func (c *OrganizationsGetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -4936,17 +4942,17 @@ func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) Do(opts ...googleap if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListAvailableOrgPolicyConstraintsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5101,17 +5107,17 @@ func (c *OrganizationsListOrgPoliciesCall) Do(opts ...googleapi.CallOption) (*Li if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListOrgPoliciesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5262,17 +5268,17 @@ func (c *OrganizationsSearchCall) Do(opts ...googleapi.CallOption) (*SearchOrgan if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SearchOrganizationsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5346,10 +5352,10 @@ type OrganizationsSetIamPolicyCall struct { // `resourcemanager.organizations.setIamPolicy` on the specified // organization // -// - resource: REQUIRED: The resource for which the policy is being -// specified. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy is being +// specified. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *OrganizationsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *OrganizationsSetIamPolicyCall { c := &OrganizationsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5424,17 +5430,17 @@ func (c *OrganizationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -5569,17 +5575,17 @@ func (c *OrganizationsSetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -5639,10 +5645,10 @@ type OrganizationsTestIamPermissionsCall struct { // organization's resource name, e.g. "organizations/123". There are no // permissions required for making this API call. // -// - resource: REQUIRED: The resource for which the policy detail is -// being requested. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy detail is +// being requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *OrganizationsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *OrganizationsTestIamPermissionsCall { c := &OrganizationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5717,17 +5723,17 @@ func (c *OrganizationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5860,17 +5866,17 @@ func (c *ProjectsClearOrgPolicyCall) Do(opts ...googleapi.CallOption) (*Empty, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -6006,17 +6012,17 @@ func (c *ProjectsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -6142,17 +6148,17 @@ func (c *ProjectsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -6289,17 +6295,17 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Project{ ServerResponse: googleapi.ServerResponse{ @@ -6431,17 +6437,17 @@ func (c *ProjectsGetAncestryCall) Do(opts ...googleapi.CallOption) (*GetAncestry if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &GetAncestryResponse{ ServerResponse: googleapi.ServerResponse{ @@ -6502,8 +6508,8 @@ type ProjectsGetEffectiveOrgPolicyCall struct { // computed `Policy` across multiple resources. Subtrees of Resource // Manager resource hierarchy with 'under:' prefix will not be expanded. // -// - resource: The name of the resource to start computing the effective -// `Policy`. +// - resource: The name of the resource to start computing the effective +// `Policy`. func (r *ProjectsService) GetEffectiveOrgPolicy(resource string, geteffectiveorgpolicyrequest *GetEffectiveOrgPolicyRequest) *ProjectsGetEffectiveOrgPolicyCall { c := &ProjectsGetEffectiveOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6578,17 +6584,17 @@ func (c *ProjectsGetEffectiveOrgPolicyCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -6652,10 +6658,10 @@ type ProjectsGetIamPolicyCall struct { // structure and identification, see Resource Names // (https://cloud.google.com/apis/design/resource_names). // -// - resource: REQUIRED: The resource for which the policy is being -// requested. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy is being +// requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *ProjectsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsGetIamPolicyCall { c := &ProjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6730,17 +6736,17 @@ func (c *ProjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -6876,17 +6882,17 @@ func (c *ProjectsGetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPolicy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -6974,7 +6980,7 @@ func (r *ProjectsService) List() *ProjectsListCall { // labels.color:* | The project has the label `color`. | | // labels.color:red | The project's label `color` has the value `red`. | // | labels.color:red labels.size:big | The project's label `color` has -// the value `red` and its label `size` has the value `big`.| | +// the value `red` or its label `size` has the value `big`. | | // lifecycleState:DELETE_REQUESTED | Only show projects that are pending // deletion.| If no filter is specified, the call will return projects // for which the user has the `resourcemanager.projects.get` permission. @@ -7079,17 +7085,17 @@ func (c *ProjectsListCall) Do(opts ...googleapi.CallOption) (*ListProjectsRespon if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListProjectsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -7110,7 +7116,7 @@ func (c *ProjectsListCall) Do(opts ...googleapi.CallOption) (*ListProjectsRespon // "parameterOrder": [], // "parameters": { // "filter": { - // "description": "Optional. An expression for filtering the results of the request. Filter rules are case insensitive. If multiple fields are included in a filter query, the query will return results that match any of the fields. Some eligible fields for filtering are: + `name` + `id` + `labels.` (where *key* is the name of a label) + `parent.type` + `parent.id` + `lifecycleState` Some examples of filter queries: | Query | Description | |------------------|-----------------------------------------------------| | name:how* | The project's name starts with \"how\". | | name:Howl | The project's name is `Howl` or `howl`. | | name:HOWL | Equivalent to above. | | NAME:howl | Equivalent to above. | | labels.color:* | The project has the label `color`. | | labels.color:red | The project's label `color` has the value `red`. | | labels.color:red labels.size:big | The project's label `color` has the value `red` and its label `size` has the value `big`.| | lifecycleState:DELETE_REQUESTED | Only show projects that are pending deletion.| If no filter is specified, the call will return projects for which the user has the `resourcemanager.projects.get` permission. NOTE: To perform a by-parent query (eg., what projects are directly in a Folder), the caller must have the `resourcemanager.projects.list` permission on the parent and the filter must contain both a `parent.type` and a `parent.id` restriction (example: \"parent.type:folder parent.id:123\"). In this case an alternate search index is used which provides more consistent results.", + // "description": "Optional. An expression for filtering the results of the request. Filter rules are case insensitive. If multiple fields are included in a filter query, the query will return results that match any of the fields. Some eligible fields for filtering are: + `name` + `id` + `labels.` (where *key* is the name of a label) + `parent.type` + `parent.id` + `lifecycleState` Some examples of filter queries: | Query | Description | |------------------|-----------------------------------------------------| | name:how* | The project's name starts with \"how\". | | name:Howl | The project's name is `Howl` or `howl`. | | name:HOWL | Equivalent to above. | | NAME:howl | Equivalent to above. | | labels.color:* | The project has the label `color`. | | labels.color:red | The project's label `color` has the value `red`. | | labels.color:red labels.size:big | The project's label `color` has the value `red` or its label `size` has the value `big`. | | lifecycleState:DELETE_REQUESTED | Only show projects that are pending deletion.| If no filter is specified, the call will return projects for which the user has the `resourcemanager.projects.get` permission. NOTE: To perform a by-parent query (eg., what projects are directly in a Folder), the caller must have the `resourcemanager.projects.list` permission on the parent and the filter must contain both a `parent.type` and a `parent.id` restriction (example: \"parent.type:folder parent.id:123\"). In this case an alternate search index is used which provides more consistent results.", // "location": "query", // "type": "string" // }, @@ -7250,17 +7256,17 @@ func (c *ProjectsListAvailableOrgPolicyConstraintsCall) Do(opts ...googleapi.Cal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListAvailableOrgPolicyConstraintsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -7415,17 +7421,17 @@ func (c *ProjectsListOrgPoliciesCall) Do(opts ...googleapi.CallOption) (*ListOrg if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListOrgPoliciesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -7539,10 +7545,10 @@ type ProjectsSetIamPolicyCall struct { // organization inaccessible. Authorization requires the Google IAM // permission `resourcemanager.projects.setIamPolicy` on the project // -// - resource: REQUIRED: The resource for which the policy is being -// specified. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy is being +// specified. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *ProjectsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsSetIamPolicyCall { c := &ProjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7617,17 +7623,17 @@ func (c *ProjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -7761,17 +7767,17 @@ func (c *ProjectsSetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPolicy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -7832,10 +7838,10 @@ type ProjectsTestIamPermissionsCall struct { // (https://cloud.google.com/apis/design/resource_names). There are no // permissions required for making this API call. // -// - resource: REQUIRED: The resource for which the policy detail is -// being requested. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy detail is +// being requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *ProjectsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsTestIamPermissionsCall { c := &ProjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7910,17 +7916,17 @@ func (c *ProjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -8056,17 +8062,17 @@ func (c *ProjectsUndeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -8199,17 +8205,17 @@ func (c *ProjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Project, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Project{ ServerResponse: googleapi.ServerResponse{ diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index d791f7d12c197..36ec411f29458 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -549,6 +549,56 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "setLabels": { + "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.addresses.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -1544,112 +1594,22 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "description": "Creates a BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview .", - "flatPath": "projects/{project}/global/backendServices", - "httpMethod": "POST", - "id": "compute.backendServices.insert", - "parameterOrder": [ - "project" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/global/backendServices", - "request": { - "$ref": "BackendService" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "description": "Retrieves the list of BackendService resources available to the specified project.", - "flatPath": "projects/{project}/global/backendServices", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/global/backendServices/{resource}/getIamPolicy", "httpMethod": "GET", - "id": "compute.backendServices.list", + "id": "compute.backendServices.getIamPolicy", "parameterOrder": [ - "project" + "project", + "resource" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", "location": "query", - "minimum": "0", "type": "integer" }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" - } - }, - "path": "projects/{project}/global/backendServices", - "response": { - "$ref": "BackendServiceList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "description": "Patches the specified BackendService resource with the data included in the request. For more information, see Backend services overview. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/global/backendServices/{backendService}", - "httpMethod": "PATCH", - "id": "compute.backendServices.patch", - "parameterOrder": [ - "project", - "backendService" - ], - "parameters": { - "backendService": { - "description": "Name of the BackendService resource to patch.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -1657,123 +1617,33 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/global/backendServices/{backendService}", - "request": { - "$ref": "BackendService" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setEdgeSecurityPolicy": { - "description": "Sets the edge security policy for the specified backend service.", - "flatPath": "projects/{project}/global/backendServices/{backendService}/setEdgeSecurityPolicy", - "httpMethod": "POST", - "id": "compute.backendServices.setEdgeSecurityPolicy", - "parameterOrder": [ - "project", - "backendService" - ], - "parameters": { - "backendService": { - "description": "Name of the BackendService resource to which the edge security policy should be set. The name should conform to RFC1035.", - "location": "path", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, - "path": "projects/{project}/global/backendServices/{backendService}/setEdgeSecurityPolicy", - "request": { - "$ref": "SecurityPolicyReference" - }, + "path": "projects/{project}/global/backendServices/{resource}/getIamPolicy", "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setSecurityPolicy": { - "description": "Sets the Google Cloud Armor security policy for the specified backend service. For more information, see Google Cloud Armor Overview", - "flatPath": "projects/{project}/global/backendServices/{backendService}/setSecurityPolicy", + "insert": { + "description": "Creates a BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview .", + "flatPath": "projects/{project}/global/backendServices", "httpMethod": "POST", - "id": "compute.backendServices.setSecurityPolicy", - "parameterOrder": [ - "project", - "backendService" - ], - "parameters": { - "backendService": { - "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", - "location": "path", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/global/backendServices/{backendService}/setSecurityPolicy", - "request": { - "$ref": "SecurityPolicyReference" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "update": { - "description": "Updates the specified BackendService resource with the data included in the request. For more information, see Backend services overview.", - "flatPath": "projects/{project}/global/backendServices/{backendService}", - "httpMethod": "PUT", - "id": "compute.backendServices.update", + "id": "compute.backendServices.insert", "parameterOrder": [ - "project", - "backendService" + "project" ], "parameters": { - "backendService": { - "description": "Name of the BackendService resource to update.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -1787,7 +1657,7 @@ "type": "string" } }, - "path": "projects/{project}/global/backendServices/{backendService}", + "path": "projects/{project}/global/backendServices", "request": { "$ref": "BackendService" }, @@ -1798,16 +1668,12 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "diskTypes": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of disk types.", - "flatPath": "projects/{project}/aggregated/diskTypes", + }, + "list": { + "description": "Retrieves the list of BackendService resources available to the specified project.", + "flatPath": "projects/{project}/global/backendServices", "httpMethod": "GET", - "id": "compute.diskTypes.aggregatedList", + "id": "compute.backendServices.list", "parameterOrder": [ "project" ], @@ -1817,11 +1683,273 @@ "location": "query", "type": "string" }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/global/backendServices", + "response": { + "$ref": "BackendServiceList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Patches the specified BackendService resource with the data included in the request. For more information, see Backend services overview. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/global/backendServices/{backendService}", + "httpMethod": "PATCH", + "id": "compute.backendServices.patch", + "parameterOrder": [ + "project", + "backendService" + ], + "parameters": { + "backendService": { + "description": "Name of the BackendService resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/backendServices/{backendService}", + "request": { + "$ref": "BackendService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setEdgeSecurityPolicy": { + "description": "Sets the edge security policy for the specified backend service.", + "flatPath": "projects/{project}/global/backendServices/{backendService}/setEdgeSecurityPolicy", + "httpMethod": "POST", + "id": "compute.backendServices.setEdgeSecurityPolicy", + "parameterOrder": [ + "project", + "backendService" + ], + "parameters": { + "backendService": { + "description": "Name of the BackendService resource to which the edge security policy should be set. The name should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/backendServices/{backendService}/setEdgeSecurityPolicy", + "request": { + "$ref": "SecurityPolicyReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/global/backendServices/{resource}/setIamPolicy", + "httpMethod": "POST", + "id": "compute.backendServices.setIamPolicy", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/backendServices/{resource}/setIamPolicy", + "request": { + "$ref": "GlobalSetPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setSecurityPolicy": { + "description": "Sets the Google Cloud Armor security policy for the specified backend service. For more information, see Google Cloud Armor Overview", + "flatPath": "projects/{project}/global/backendServices/{backendService}/setSecurityPolicy", + "httpMethod": "POST", + "id": "compute.backendServices.setSecurityPolicy", + "parameterOrder": [ + "project", + "backendService" + ], + "parameters": { + "backendService": { + "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/backendServices/{backendService}/setSecurityPolicy", + "request": { + "$ref": "SecurityPolicyReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "description": "Updates the specified BackendService resource with the data included in the request. For more information, see Backend services overview.", + "flatPath": "projects/{project}/global/backendServices/{backendService}", + "httpMethod": "PUT", + "id": "compute.backendServices.update", + "parameterOrder": [ + "project", + "backendService" + ], + "parameters": { + "backendService": { + "description": "Name of the BackendService resource to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/backendServices/{backendService}", + "request": { + "$ref": "BackendService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "diskTypes": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of disk types.", + "flatPath": "projects/{project}/aggregated/diskTypes", + "httpMethod": "GET", + "id": "compute.diskTypes.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -4307,6 +4435,43 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "setLabels": { + "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/global/addresses/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.globalAddresses.setLabels", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/addresses/{resource}/setLabels", + "request": { + "$ref": "GlobalSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -7308,7 +7473,7 @@ ] }, "listManagedInstances": { - "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported.", + "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "httpMethod": "POST", "id": "compute.instanceGroupManagers.listManagedInstances", @@ -8765,7 +8930,7 @@ ] }, "bulkInsert": { - "description": "Creates multiple instances. Count specifies the number of instances to create.", + "description": "Creates multiple instances. Count specifies the number of instances to create. For more information, see About bulk creation of VMs.", "flatPath": "projects/{project}/zones/{zone}/instances/bulkInsert", "httpMethod": "POST", "id": "compute.instances.bulkInsert", @@ -10436,6 +10601,11 @@ "instance" ], "parameters": { + "discardLocalSsd": { + "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "location": "query", + "type": "boolean" + }, "instance": { "description": "Name of the instance resource to stop.", "location": "path", @@ -10483,6 +10653,11 @@ "instance" ], "parameters": { + "discardLocalSsd": { + "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "location": "query", + "type": "boolean" + }, "instance": { "description": "Name of the instance resource to suspend.", "location": "path", @@ -11176,6 +11351,56 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "setLabels": { + "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.interconnectAttachments.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -11276,7 +11501,7 @@ "interconnects": { "methods": { "delete": { - "description": "Deletes the specified interconnect.", + "description": "Deletes the specified Interconnect.", "flatPath": "projects/{project}/global/interconnects/{interconnect}", "httpMethod": "DELETE", "id": "compute.interconnects.delete", @@ -11315,7 +11540,7 @@ ] }, "get": { - "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + "description": "Returns the specified Interconnect. Get a list of available Interconnects by making a list() request.", "flatPath": "projects/{project}/global/interconnects/{interconnect}", "httpMethod": "GET", "id": "compute.interconnects.get", @@ -11350,7 +11575,7 @@ ] }, "getDiagnostics": { - "description": "Returns the interconnectDiagnostics for the specified interconnect.", + "description": "Returns the interconnectDiagnostics for the specified Interconnect.", "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", "httpMethod": "GET", "id": "compute.interconnects.getDiagnostics", @@ -11385,7 +11610,7 @@ ] }, "insert": { - "description": "Creates a Interconnect in the specified project using the data included in the request.", + "description": "Creates an Interconnect in the specified project using the data included in the request.", "flatPath": "projects/{project}/global/interconnects", "httpMethod": "POST", "id": "compute.interconnects.insert", @@ -11419,7 +11644,7 @@ ] }, "list": { - "description": "Retrieves the list of interconnect available to the specified project.", + "description": "Retrieves the list of Interconnects available to the specified project.", "flatPath": "projects/{project}/global/interconnects", "httpMethod": "GET", "id": "compute.interconnects.list", @@ -11474,7 +11699,7 @@ ] }, "patch": { - "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates the specified Interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "flatPath": "projects/{project}/global/interconnects/{interconnect}", "httpMethod": "PATCH", "id": "compute.interconnects.patch", @@ -11514,6 +11739,43 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "setLabels": { + "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/global/interconnects/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.interconnects.setLabels", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/interconnects/{resource}/setLabels", + "request": { + "$ref": "GlobalSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -12338,13 +12600,13 @@ } } }, - "networkEdgeSecurityServices": { + "networkAttachments": { "methods": { "aggregatedList": { - "description": "Retrieves the list of all NetworkEdgeSecurityService resources available to the specified project.", - "flatPath": "projects/{project}/aggregated/networkEdgeSecurityServices", + "description": "Retrieves the list of all NetworkAttachment resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/networkAttachments", "httpMethod": "GET", - "id": "compute.networkEdgeSecurityServices.aggregatedList", + "id": "compute.networkAttachments.aggregatedList", "parameterOrder": [ "project" ], @@ -12378,7 +12640,7 @@ "type": "string" }, "project": { - "description": "Name of the project scoping this request.", + "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, @@ -12390,9 +12652,9 @@ "type": "boolean" } }, - "path": "projects/{project}/aggregated/networkEdgeSecurityServices", + "path": "projects/{project}/aggregated/networkAttachments", "response": { - "$ref": "NetworkEdgeSecurityServiceAggregatedList" + "$ref": "NetworkAttachmentAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -12401,18 +12663,18 @@ ] }, "delete": { - "description": "Deletes the specified service.", - "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "description": "Deletes the specified NetworkAttachment in the given scope", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", "httpMethod": "DELETE", - "id": "compute.networkEdgeSecurityServices.delete", + "id": "compute.networkAttachments.delete", "parameterOrder": [ "project", "region", - "networkEdgeSecurityService" + "networkAttachment" ], "parameters": { - "networkEdgeSecurityService": { - "description": "Name of the network edge security service to delete.", + "networkAttachment": { + "description": "Name of the NetworkAttachment resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -12426,19 +12688,19 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", "response": { "$ref": "Operation" }, @@ -12448,18 +12710,18 @@ ] }, "get": { - "description": "Gets a specified NetworkEdgeSecurityService.", - "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "description": "Returns the specified NetworkAttachment resource in the given scope.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", "httpMethod": "GET", - "id": "compute.networkEdgeSecurityServices.get", + "id": "compute.networkAttachments.get", "parameterOrder": [ "project", "region", - "networkEdgeSecurityService" + "networkAttachment" ], "parameters": { - "networkEdgeSecurityService": { - "description": "Name of the network edge security service to get.", + "networkAttachment": { + "description": "Name of the NetworkAttachment resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -12473,16 +12735,65 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", "response": { - "$ref": "NetworkEdgeSecurityService" + "$ref": "NetworkAttachment" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", + "httpMethod": "GET", + "id": "compute.networkAttachments.getIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", + "response": { + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -12491,10 +12802,10 @@ ] }, "insert": { - "description": "Creates a new service in the specified project using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices", + "description": "Creates a NetworkAttachment in the specified project in the given scope using the parameters that are included in the request.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments", "httpMethod": "POST", - "id": "compute.networkEdgeSecurityServices.insert", + "id": "compute.networkAttachments.insert", "parameterOrder": [ "project", "region" @@ -12508,26 +12819,21 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", "location": "query", "type": "string" - }, - "validateOnly": { - "description": "If true, the request will not be committed.", - "location": "query", - "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices", + "path": "projects/{project}/regions/{region}/networkAttachments", "request": { - "$ref": "NetworkEdgeSecurityService" + "$ref": "NetworkAttachment" }, "response": { "$ref": "Operation" @@ -12537,27 +12843,37 @@ "https://www.googleapis.com/auth/compute" ] }, - "patch": { - "description": "Patches the specified policy with the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", - "httpMethod": "PATCH", - "id": "compute.networkEdgeSecurityServices.patch", + "list": { + "description": "Lists the NetworkAttachments for a project in the given scope.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments", + "httpMethod": "GET", + "id": "compute.networkAttachments.list", "parameterOrder": [ "project", - "region", - "networkEdgeSecurityService" + "region" ], "parameters": { - "networkEdgeSecurityService": { - "description": "Name of the network edge security service to update.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", "type": "string" }, - "paths": { + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", - "repeated": true, "type": "string" }, "project": { @@ -12568,45 +12884,390 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/networkAttachments", + "response": { + "$ref": "NetworkAttachmentList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", + "httpMethod": "POST", + "id": "compute.networkAttachments.setIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, "type": "string" }, - "updateMask": { - "description": "Indicates fields to be updated as part of this request.", - "format": "google-fieldmask", - "location": "query", + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", "request": { - "$ref": "NetworkEdgeSecurityService" + "$ref": "RegionSetPolicyRequest" }, "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", + "httpMethod": "POST", + "id": "compute.networkAttachments.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] } } }, - "networkEndpointGroups": { + "networkEdgeSecurityServices": { "methods": { "aggregatedList": { - "description": "Retrieves the list of network endpoint groups and sorts them by zone.", - "flatPath": "projects/{project}/aggregated/networkEndpointGroups", + "description": "Retrieves the list of all NetworkEdgeSecurityService resources available to the specified project.", + "flatPath": "projects/{project}/aggregated/networkEdgeSecurityServices", "httpMethod": "GET", - "id": "compute.networkEndpointGroups.aggregatedList", + "id": "compute.networkEdgeSecurityServices.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/networkEdgeSecurityServices", + "response": { + "$ref": "NetworkEdgeSecurityServiceAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified service.", + "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "httpMethod": "DELETE", + "id": "compute.networkEdgeSecurityServices.delete", + "parameterOrder": [ + "project", + "region", + "networkEdgeSecurityService" + ], + "parameters": { + "networkEdgeSecurityService": { + "description": "Name of the network edge security service to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Gets a specified NetworkEdgeSecurityService.", + "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "httpMethod": "GET", + "id": "compute.networkEdgeSecurityServices.get", + "parameterOrder": [ + "project", + "region", + "networkEdgeSecurityService" + ], + "parameters": { + "networkEdgeSecurityService": { + "description": "Name of the network edge security service to get.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "response": { + "$ref": "NetworkEdgeSecurityService" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a new service in the specified project using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices", + "httpMethod": "POST", + "id": "compute.networkEdgeSecurityServices.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "validateOnly": { + "description": "If true, the request will not be committed.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices", + "request": { + "$ref": "NetworkEdgeSecurityService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "patch": { + "description": "Patches the specified policy with the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "httpMethod": "PATCH", + "id": "compute.networkEdgeSecurityServices.patch", + "parameterOrder": [ + "project", + "region", + "networkEdgeSecurityService" + ], + "parameters": { + "networkEdgeSecurityService": { + "description": "Name of the network edge security service to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "paths": { + "location": "query", + "repeated": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "updateMask": { + "description": "Indicates fields to be updated as part of this request.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "request": { + "$ref": "NetworkEdgeSecurityService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "networkEndpointGroups": { + "methods": { + "aggregatedList": { + "description": "Retrieves the list of network endpoint groups and sorts them by zone.", + "flatPath": "projects/{project}/aggregated/networkEndpointGroups", + "httpMethod": "GET", + "id": "compute.networkEndpointGroups.aggregatedList", "parameterOrder": [ "project" ], @@ -17273,6 +17934,55 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy", + "httpMethod": "GET", + "id": "compute.regionBackendServices.getIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "insert": { "description": "Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", "flatPath": "projects/{project}/regions/{region}/backendServices", @@ -17428,6 +18138,51 @@ "https://www.googleapis.com/auth/compute" ] }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", + "httpMethod": "POST", + "id": "compute.regionBackendServices.setIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", + "request": { + "$ref": "RegionSetPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "update": { "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Backend services overview .", "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", @@ -19492,7 +20247,7 @@ ] }, "listManagedInstances": { - "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported.", + "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.listManagedInstances", @@ -21891,7 +22646,7 @@ ] }, "patch": { - "description": "Patches the specified policy with the data included in the request.", + "description": "Patches the specified policy with the data included in the request. To clear fields in the rule, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", "httpMethod": "PATCH", "id": "compute.regionSecurityPolicies.patch", @@ -22141,17 +22896,17 @@ } } }, - "regionTargetHttpProxies": { + "regionSslPolicies": { "methods": { "delete": { - "description": "Deletes the specified TargetHttpProxy resource.", - "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", "httpMethod": "DELETE", - "id": "compute.regionTargetHttpProxies.delete", + "id": "compute.regionSslPolicies.delete", "parameterOrder": [ "project", "region", - "targetHttpProxy" + "sslPolicy" ], "parameters": { "project": { @@ -22173,15 +22928,14 @@ "location": "query", "type": "string" }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to delete.", + "sslPolicy": { + "description": "Name of the SSL policy to delete. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", "response": { "$ref": "Operation" }, @@ -22191,14 +22945,14 @@ ] }, "get": { - "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "description": "Lists all of the ordered rules present in a single specified policy.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", "httpMethod": "GET", - "id": "compute.regionTargetHttpProxies.get", + "id": "compute.regionSslPolicies.get", "parameterOrder": [ "project", "region", - "targetHttpProxy" + "sslPolicy" ], "parameters": { "project": { @@ -22215,17 +22969,16 @@ "required": true, "type": "string" }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to return.", + "sslPolicy": { + "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", "response": { - "$ref": "TargetHttpProxy" + "$ref": "SslPolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22234,10 +22987,10 @@ ] }, "insert": { - "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/targetHttpProxies", + "description": "Creates a new policy in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies", "httpMethod": "POST", - "id": "compute.regionTargetHttpProxies.insert", + "id": "compute.regionSslPolicies.insert", "parameterOrder": [ "project", "region" @@ -22263,9 +23016,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies", + "path": "projects/{project}/regions/{region}/sslPolicies", "request": { - "$ref": "TargetHttpProxy" + "$ref": "SslPolicy" }, "response": { "$ref": "Operation" @@ -22276,10 +23029,10 @@ ] }, "list": { - "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region.", - "flatPath": "projects/{project}/regions/{region}/targetHttpProxies", + "description": "Lists all the SSL policies that have been configured for the specified project and region.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies", "httpMethod": "GET", - "id": "compute.regionTargetHttpProxies.list", + "id": "compute.regionSslPolicies.list", "parameterOrder": [ "project", "region" @@ -22328,9 +23081,9 @@ "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies", + "path": "projects/{project}/regions/{region}/sslPolicies", "response": { - "$ref": "TargetHttpProxyList" + "$ref": "SslPoliciesList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22338,15 +23091,78 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setUrlMap": { - "description": "Changes the URL map for TargetHttpProxy.", - "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", - "httpMethod": "POST", - "id": "compute.regionTargetHttpProxies.setUrlMap", + "listAvailableFeatures": { + "description": "Lists all features that can be specified in the SSL policy when using custom profile.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies/listAvailableFeatures", + "httpMethod": "GET", + "id": "compute.regionSslPolicies.listAvailableFeatures", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/sslPolicies/listAvailableFeatures", + "response": { + "$ref": "SslPoliciesListAvailableFeaturesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Patches the specified SSL policy with the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", + "httpMethod": "PATCH", + "id": "compute.regionSslPolicies.patch", "parameterOrder": [ "project", "region", - "targetHttpProxy" + "sslPolicy" ], "parameters": { "project": { @@ -22368,17 +23184,16 @@ "location": "query", "type": "string" }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy to set a URL map for.", + "sslPolicy": { + "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", "request": { - "$ref": "UrlMapReference" + "$ref": "SslPolicy" }, "response": { "$ref": "Operation" @@ -22390,17 +23205,17 @@ } } }, - "regionTargetHttpsProxies": { + "regionTargetHttpProxies": { "methods": { "delete": { - "description": "Deletes the specified TargetHttpsProxy resource.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "description": "Deletes the specified TargetHttpProxy resource.", + "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", "httpMethod": "DELETE", - "id": "compute.regionTargetHttpsProxies.delete", + "id": "compute.regionTargetHttpProxies.delete", "parameterOrder": [ "project", "region", - "targetHttpsProxy" + "targetHttpProxy" ], "parameters": { "project": { @@ -22422,15 +23237,15 @@ "location": "query", "type": "string" }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to delete.", + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", "response": { "$ref": "Operation" }, @@ -22440,14 +23255,14 @@ ] }, "get": { - "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", "httpMethod": "GET", - "id": "compute.regionTargetHttpsProxies.get", + "id": "compute.regionTargetHttpProxies.get", "parameterOrder": [ "project", "region", - "targetHttpsProxy" + "targetHttpProxy" ], "parameters": { "project": { @@ -22464,17 +23279,17 @@ "required": true, "type": "string" }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to return.", + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", "response": { - "$ref": "TargetHttpsProxy" + "$ref": "TargetHttpProxy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22483,10 +23298,10 @@ ] }, "insert": { - "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies", + "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/targetHttpProxies", "httpMethod": "POST", - "id": "compute.regionTargetHttpsProxies.insert", + "id": "compute.regionTargetHttpProxies.insert", "parameterOrder": [ "project", "region" @@ -22512,9 +23327,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies", + "path": "projects/{project}/regions/{region}/targetHttpProxies", "request": { - "$ref": "TargetHttpsProxy" + "$ref": "TargetHttpProxy" }, "response": { "$ref": "Operation" @@ -22525,10 +23340,10 @@ ] }, "list": { - "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies", + "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region.", + "flatPath": "projects/{project}/regions/{region}/targetHttpProxies", "httpMethod": "GET", - "id": "compute.regionTargetHttpsProxies.list", + "id": "compute.regionTargetHttpProxies.list", "parameterOrder": [ "project", "region" @@ -22577,9 +23392,9 @@ "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies", + "path": "projects/{project}/regions/{region}/targetHttpProxies", "response": { - "$ref": "TargetHttpsProxyList" + "$ref": "TargetHttpProxyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22587,15 +23402,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Patches the specified regional TargetHttpsProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", - "httpMethod": "PATCH", - "id": "compute.regionTargetHttpsProxies.patch", + "setUrlMap": { + "description": "Changes the URL map for TargetHttpProxy.", + "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + "httpMethod": "POST", + "id": "compute.regionTargetHttpProxies.setUrlMap", "parameterOrder": [ "project", "region", - "targetHttpsProxy" + "targetHttpProxy" ], "parameters": { "project": { @@ -22606,7 +23421,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -22617,17 +23432,17 @@ "location": "query", "type": "string" }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to patch.", + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy to set a URL map for.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", "request": { - "$ref": "TargetHttpsProxy" + "$ref": "UrlMapReference" }, "response": { "$ref": "Operation" @@ -22636,12 +23451,261 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "setSslCertificates": { - "description": "Replaces SslCertificates for TargetHttpsProxy.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", - "httpMethod": "POST", - "id": "compute.regionTargetHttpsProxies.setSslCertificates", + } + } + }, + "regionTargetHttpsProxies": { + "methods": { + "delete": { + "description": "Deletes the specified TargetHttpsProxy resource.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "httpMethod": "DELETE", + "id": "compute.regionTargetHttpsProxies.delete", + "parameterOrder": [ + "project", + "region", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "httpMethod": "GET", + "id": "compute.regionTargetHttpsProxies.get", + "parameterOrder": [ + "project", + "region", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "response": { + "$ref": "TargetHttpsProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies", + "httpMethod": "POST", + "id": "compute.regionTargetHttpsProxies.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpsProxies", + "request": { + "$ref": "TargetHttpsProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies", + "httpMethod": "GET", + "id": "compute.regionTargetHttpsProxies.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpsProxies", + "response": { + "$ref": "TargetHttpsProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Patches the specified regional TargetHttpsProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "httpMethod": "PATCH", + "id": "compute.regionTargetHttpsProxies.patch", + "parameterOrder": [ + "project", + "region", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "request": { + "$ref": "TargetHttpsProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setSslCertificates": { + "description": "Replaces SslCertificates for TargetHttpsProxy.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + "httpMethod": "POST", + "id": "compute.regionTargetHttpsProxies.setSslCertificates", "parameterOrder": [ "project", "region", @@ -22739,6 +23803,205 @@ } } }, + "regionTargetTcpProxies": { + "methods": { + "delete": { + "description": "Deletes the specified TargetTcpProxy resource.", + "flatPath": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + "httpMethod": "DELETE", + "id": "compute.regionTargetTcpProxies.delete", + "parameterOrder": [ + "project", + "region", + "targetTcpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetTcpProxy": { + "description": "Name of the TargetTcpProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified TargetTcpProxy resource.", + "flatPath": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + "httpMethod": "GET", + "id": "compute.regionTargetTcpProxies.get", + "parameterOrder": [ + "project", + "region", + "targetTcpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "targetTcpProxy": { + "description": "Name of the TargetTcpProxy resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + "response": { + "$ref": "TargetTcpProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a TargetTcpProxy resource in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/targetTcpProxies", + "httpMethod": "POST", + "id": "compute.regionTargetTcpProxies.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetTcpProxies", + "request": { + "$ref": "TargetTcpProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves a list of TargetTcpProxy resources available to the specified project in a given region.", + "flatPath": "projects/{project}/regions/{region}/targetTcpProxies", + "httpMethod": "GET", + "id": "compute.regionTargetTcpProxies.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/targetTcpProxies", + "response": { + "$ref": "TargetTcpProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, "regionUrlMaps": { "methods": { "delete": { @@ -25141,7 +26404,7 @@ ] }, "patch": { - "description": "Patches the specified policy with the data included in the request. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", + "description": "Patches the specified policy with the data included in the request. To clear fields in the rule, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}", "httpMethod": "PATCH", "id": "compute.securityPolicies.patch", @@ -25269,6 +26532,43 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "setLabels": { + "description": "Sets the labels on a security policy. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/global/securityPolicies/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.securityPolicies.setLabels", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/securityPolicies/{resource}/setLabels", + "request": { + "$ref": "GlobalSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -26268,6 +27568,66 @@ }, "sslPolicies": { "methods": { + "aggregatedList": { + "description": "Retrieves the list of all SslPolicy resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/sslPolicies", + "httpMethod": "GET", + "id": "compute.sslPolicies.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/sslPolicies", + "response": { + "$ref": "SslPoliciesAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "delete": { "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", "flatPath": "projects/{project}/global/sslPolicies/{sslPolicy}", @@ -29330,6 +30690,66 @@ }, "targetTcpProxies": { "methods": { + "aggregatedList": { + "description": "Retrieves the list of all TargetTcpProxy resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/targetTcpProxies", + "httpMethod": "GET", + "id": "compute.targetTcpProxies.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/targetTcpProxies", + "response": { + "$ref": "TargetTcpProxyAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "delete": { "description": "Deletes the specified TargetTcpProxy resource.", "flatPath": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}", @@ -29835,6 +31255,56 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "setLabels": { + "description": "Sets the labels on a TargetVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.targetVpnGateways.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -30882,6 +32352,56 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "setLabels": { + "description": "Sets the labels on a VpnTunnel. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.vpnTunnels.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/vpnTunnels/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -31172,7 +32692,7 @@ } } }, - "revision": "20220526", + "revision": "20221206", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -31287,6 +32807,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -31315,6 +32836,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -31405,6 +32927,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -31433,6 +32956,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -31505,6 +33029,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -31533,6 +33058,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -31696,6 +33222,18 @@ ], "type": "string" }, + "ipv6EndpointType": { + "description": "The endpoint type of this address, which should be VM or NETLB. This is used for deciding which type of endpoint this address can be used after the external IPv6 address reservation.", + "enum": [ + "NETLB", + "VM" + ], + "enumDescriptions": [ + "Reserved IPv6 address can be used on network load balancer.", + "Reserved IPv6 address can be used on VM." + ], + "type": "string" + }, "kind": { "default": "compute#address", "description": "[Output Only] Type of the resource. Always compute#address for addresses.", @@ -31737,7 +33275,7 @@ "type": "integer" }, "purpose": { - "description": "The purpose of this resource, which can be one of the following values: - GCE_ENDPOINT for addresses that are used by VM instances, alias IP ranges, load balancers, and similar resources. - DNS_RESOLVER for a DNS resolver address in a subnetwork for a Cloud DNS inbound forwarder IP addresses (regional internal IP address in a subnet of a VPC network) - VPC_PEERING for global internal IP addresses used for private services access allocated ranges. - NAT_AUTO for the regional external IP addresses used by Cloud NAT when allocating addresses using automatic NAT IP address allocation. - IPSEC_INTERCONNECT for addresses created from a private IP range that are reserved for a VLAN attachment in an *IPsec-encrypted Cloud Interconnect* configuration. These addresses are regional resources. Not currently available publicly. - `SHARED_LOADBALANCER_VIP` for an internal IP address that is assigned to multiple internal forwarding rules. - `PRIVATE_SERVICE_CONNECT` for a private network address that is used to configure Private Service Connect. Only global internal addresses can use this purpose. ", + "description": "The purpose of this resource, which can be one of the following values: - GCE_ENDPOINT for addresses that are used by VM instances, alias IP ranges, load balancers, and similar resources. - DNS_RESOLVER for a DNS resolver address in a subnetwork for a Cloud DNS inbound forwarder IP addresses (regional internal IP address in a subnet of a VPC network) - VPC_PEERING for global internal IP addresses used for private services access allocated ranges. - NAT_AUTO for the regional external IP addresses used by Cloud NAT when allocating addresses using automatic NAT IP address allocation. - IPSEC_INTERCONNECT for addresses created from a private IP range that are reserved for a VLAN attachment in an *HA VPN over Cloud Interconnect* configuration. These addresses are regional resources. - `SHARED_LOADBALANCER_VIP` for an internal IP address that is assigned to multiple internal forwarding rules. - `PRIVATE_SERVICE_CONNECT` for a private network address that is used to configure Private Service Connect. Only global internal addresses can use this purpose. ", "enum": [ "DNS_RESOLVER", "GCE_ENDPOINT", @@ -31751,7 +33289,7 @@ "enumDescriptions": [ "DNS resolver address in the subnetwork.", "VM internal/alias IP, Internal LB service IP, etc.", - "A regional internal IP address range reserved for the VLAN attachment that is used in IPsec-encrypted Cloud Interconnect. This regional internal IP address range must not overlap with any IP address range of subnet/route in the VPC network and its peering networks. After the VLAN attachment is created with the reserved IP address range, when creating a new VPN gateway, its interface IP address is allocated from the associated VLAN attachment’s IP address range.", + "A regional internal IP address range reserved for the VLAN attachment that is used in HA VPN over Cloud Interconnect. This regional internal IP address range must not overlap with any IP address range of subnet/route in the VPC network and its peering networks. After the VLAN attachment is created with the reserved IP address range, when creating a new VPN gateway, its interface IP address is allocated from the associated VLAN attachment’s IP address range.", "External IP automatically reserved for Cloud NAT.", "A private network IP address that can be used to configure Private Service Connect. This purpose can be specified only for GLOBAL addresses of Type INTERNAL", "A regional internal IP address range reserved for Serverless.", @@ -31845,6 +33383,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -31873,6 +33412,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -31963,6 +33503,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -31991,6 +33532,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -32063,6 +33605,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -32091,6 +33634,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -32155,6 +33699,11 @@ "description": "The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.", "format": "int32", "type": "integer" + }, + "visibleCoreCount": { + "description": "The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width.", + "format": "int32", + "type": "integer" } }, "type": "object" @@ -32260,6 +33809,20 @@ "description": "An instance-attached disk resource.", "id": "AttachedDisk", "properties": { + "architecture": { + "description": "[Output Only] The architecture of the attached disk. Valid values are ARM64 or X86_64.", + "enum": [ + "ARCHITECTURE_UNSPECIFIED", + "ARM64", + "X86_64" + ], + "enumDescriptions": [ + "Default value indicating Architecture is not set.", + "Machines with architecture ARM64", + "Machines with architecture X86_64" + ], + "type": "string" + }, "autoDelete": { "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance).", "type": "boolean" @@ -32281,6 +33844,10 @@ "format": "int64", "type": "string" }, + "forceAttach": { + "description": "[Input Only] Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error.", + "type": "boolean" + }, "guestOsFeatures": { "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", "items": { @@ -32298,7 +33865,7 @@ "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both." }, "interface": { - "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.", + "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. For most machine types, the default is SCSI. Local SSDs can use either NVME or SCSI. In certain configurations, persistent disks can use NVMe. For more information, see About persistent disks.", "enum": [ "NVME", "SCSI" @@ -32360,6 +33927,20 @@ "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This field is persisted and returned for instanceTemplate and not returned in the context of instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both.", "id": "AttachedDiskInitializeParams", "properties": { + "architecture": { + "description": "The architecture of the attached disk. Valid values are arm64 or x86_64.", + "enum": [ + "ARCHITECTURE_UNSPECIFIED", + "ARM64", + "X86_64" + ], + "enumDescriptions": [ + "Default value indicating Architecture is not set.", + "Machines with architecture ARM64", + "Machines with architecture X86_64" + ], + "type": "string" + }, "description": { "description": "An optional description. Provide this property when creating the disk.", "type": "string" @@ -32374,7 +33955,7 @@ "type": "string" }, "diskType": { - "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType Note that for InstanceTemplate, this is the name of the disk type, not URL.", + "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you specify this field when creating a VM, you can provide either the full or partial URL. For example, the following values are valid: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType If you specify this field when creating or updating an instance template or all-instances configuration, specify the type of the disk, not the URL. For example: pd-standard.", "type": "string" }, "labels": { @@ -32410,6 +33991,13 @@ "format": "int64", "type": "string" }, + "resourceManagerTags": { + "additionalProperties": { + "type": "string" + }, + "description": "Resource manager tags to be bound to the disk. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT \u0026 PATCH) when empty.", + "type": "object" + }, "resourcePolicies": { "description": "Resource policies applied to this disk for automatic snapshot creations. Specified using the full or partial URL. For instance template, specify only the resource policy name.", "items": { @@ -32423,7 +34011,7 @@ }, "sourceImageEncryptionKey": { "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. Instance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys." + "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. InstanceTemplate and InstancePropertiesPatch do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys." }, "sourceSnapshot": { "description": "The source snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: global/snapshots/my-backup If the source snapshot is deleted later, this field will not be set.", @@ -32659,6 +34247,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -32687,6 +34276,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -32777,6 +34367,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -32805,6 +34396,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -32935,6 +34527,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -32963,6 +34556,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -33217,7 +34811,7 @@ "type": "string" }, "capacityScaler": { - "description": "A multiplier applied to the backend's target capacity of its balancing mode. The default value is 1, which means the group serves up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available capacity. The valid ranges are 0.0 and [0.1,1.0]. You cannot configure a setting larger than 0 and smaller than 0.1. You cannot configure a setting of 0 when there is only one backend attached to the backend service.", + "description": "A multiplier applied to the backend's target capacity of its balancing mode. The default value is 1, which means the group serves up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available capacity. The valid ranges are 0.0 and [0.1,1.0]. You cannot configure a setting larger than 0 and smaller than 0.1. You cannot configure a setting of 0 when there is only one backend attached to the backend service. Not available with backends that don't support using a balancingMode. This includes backends such as global internet NEGs, regional serverless NEGs, and PSC NEGs.", "format": "float", "type": "number" }, @@ -33264,7 +34858,7 @@ "type": "number" }, "maxUtilization": { - "description": "Optional parameter to define a target capacity for the UTILIZATIONbalancing mode. The valid range is [0.0, 1.0]. For usage guidelines, see Utilization balancing mode.", + "description": "Optional parameter to define a target capacity for the UTILIZATION balancing mode. The valid range is [0.0, 1.0]. For usage guidelines, see Utilization balancing mode.", "format": "float", "type": "number" } @@ -33283,6 +34877,18 @@ "$ref": "BackendBucketCdnPolicy", "description": "Cloud CDN configuration for this BackendBucket." }, + "compressionMode": { + "description": "Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header.", + "enum": [ + "AUTOMATIC", + "DISABLED" + ], + "enumDescriptions": [ + "Automatically uses the best compression based on the Accept-Encoding header sent by the client.", + "Disables compression. Existing compressed responses cached by Cloud CDN will not be served to clients." + ], + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -33500,6 +35106,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -33528,6 +35135,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -33599,6 +35207,18 @@ "circuitBreakers": { "$ref": "CircuitBreakers" }, + "compressionMode": { + "description": "Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header.", + "enum": [ + "AUTOMATIC", + "DISABLED" + ], + "enumDescriptions": [ + "Automatically uses the best compression based on the Accept-Encoding header sent by the client.", + "Disables compression. Existing compressed responses cached by Cloud CDN will not be served to clients." + ], + "type": "string" + }, "connectionDraining": { "$ref": "ConnectionDraining" }, @@ -33738,7 +35358,7 @@ }, "outlierDetection": { "$ref": "OutlierDetection", - "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true." + "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, HTTP2, or GRPC, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. " }, "port": { "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port.", @@ -33881,6 +35501,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -33909,6 +35530,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -34220,6 +35842,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -34248,6 +35871,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -34358,11 +35982,11 @@ "id": "BackendServiceLogConfig", "properties": { "enable": { - "description": "This field denotes whether to enable logging for the load balancer traffic served by this backend service.", + "description": "Denotes whether to enable logging for the load balancer traffic served by this backend service. The default value is false.", "type": "boolean" }, "sampleRate": { - "description": "This field can only be specified if logging is enabled for this backend service. The value of the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 0.0.", + "description": "This field can only be specified if logging is enabled for this backend service. The value of the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 1.0.", "format": "float", "type": "number" } @@ -34402,6 +36026,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -34430,6 +36055,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -34734,7 +36360,7 @@ "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", + "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, @@ -34762,7 +36388,7 @@ }, "locationPolicy": { "$ref": "LocationPolicy", - "description": "Policy for chosing target zone." + "description": "Policy for chosing target zone. For more information, see Create VMs in bulk ." }, "minCount": { "description": "The minimum number of instances to create. If no min_count is specified then count is used as the default value. If min_count instances cannot be created, then no instances will be created and instances already created will be deleted.", @@ -34938,6 +36564,13 @@ "$ref": "LicenseResourceCommitment", "description": "The license specification required as part of a license commitment." }, + "mergeSourceCommitments": { + "description": "List of source commitments to be merged into a new commitment.", + "items": { + "type": "string" + }, + "type": "array" + }, "name": { "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -34979,6 +36612,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "splitSourceCommitment": { + "description": "Source commitment to be splitted into a new commitment.", + "type": "string" + }, "startTimestamp": { "description": "[Output Only] Commitment start time in RFC3339 text format.", "type": "string" @@ -34987,6 +36624,7 @@ "description": "[Output Only] Status of the commitment with regards to eventual expiration (each commitment has an end date defined). One of the following values: NOT_YET_ACTIVE, ACTIVE, EXPIRED.", "enum": [ "ACTIVE", + "CANCELLED", "CREATING", "EXPIRED", "NOT_YET_ACTIVE" @@ -34995,6 +36633,7 @@ "", "", "", + "", "" ], "type": "string" @@ -35085,6 +36724,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -35113,6 +36753,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -35203,6 +36844,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -35231,6 +36873,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -35303,6 +36946,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -35331,6 +36975,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -35544,7 +37189,7 @@ "type": "array" }, "allowOriginRegexes": { - "description": "Specifies a regular expression that matches allowed origins. For more information about the regular expression syntax, see Syntax. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes.", + "description": "Specifies a regular expression that matches allowed origins. For more information about the regular expression syntax, see Syntax. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. Regular expressions can only be used when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED.", "items": { "type": "string" }, @@ -35659,6 +37304,20 @@ "description": "Represents a Persistent Disk resource. Google Compute Engine has two Disk resources: * [Zonal](/compute/docs/reference/rest/v1/disks) * [Regional](/compute/docs/reference/rest/v1/regionDisks) Persistent disks are required for running your VM instances. Create both boot and non-boot (data) persistent disks. For more information, read Persistent Disks. For more storage options, read Storage options. The disks resource represents a zonal persistent disk. For more information, read Zonal persistent disks. The regionDisks resource represents a regional persistent disk. For more information, read Regional resources.", "id": "Disk", "properties": { + "architecture": { + "description": "The architecture of the disk. Valid values are ARM64 or X86_64.", + "enum": [ + "ARCHITECTURE_UNSPECIFIED", + "ARM64", + "X86_64" + ], + "enumDescriptions": [ + "Default value indicating Architecture is not set.", + "Machines with architecture ARM64", + "Machines with architecture X86_64" + ], + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -35741,6 +37400,10 @@ "description": "Internal use only.", "type": "string" }, + "params": { + "$ref": "DiskParams", + "description": "Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload." + }, "physicalBlockSizeBytes": { "description": "Physical block size of the persistent disk, in bytes. If not present in a request, a default value is used. The currently supported size is 4096, other sizes may be added in the future. If an unsupported value is requested, the error message will list the supported values for the caller's project.", "format": "int64", @@ -35903,6 +37566,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -35931,6 +37595,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -36062,6 +37727,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -36090,6 +37756,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -36152,6 +37819,20 @@ }, "type": "object" }, + "DiskParams": { + "description": "Additional disk params.", + "id": "DiskParams", + "properties": { + "resourceManagerTags": { + "additionalProperties": { + "type": "string" + }, + "description": "Resource manager tags to be bound to the disk. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT \u0026 PATCH) when empty.", + "type": "object" + } + }, + "type": "object" + }, "DiskType": { "description": "Represents a Disk Type resource. Google Compute Engine has two Disk Type resources: * [Regional](/compute/docs/reference/rest/v1/regionDiskTypes) * [Zonal](/compute/docs/reference/rest/v1/diskTypes) You can choose from a variety of disk types based on your needs. For more information, read Storage options. The diskTypes resource represents disk types for a zonal persistent disk. For more information, read Zonal persistent disks. The regionDiskTypes resource represents disk types for a regional persistent disk. For more information, read Regional persistent disks.", "id": "DiskType", @@ -36256,6 +37937,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -36284,6 +37966,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -36374,6 +38057,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -36402,6 +38086,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -36474,6 +38159,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -36502,6 +38188,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -36611,6 +38298,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -36639,6 +38327,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -36757,6 +38446,28 @@ }, "type": "object" }, + "ErrorInfo": { + "description": "Describes the cause of the error with structured details. Example of an error when contacting the \"pubsub.googleapis.com\" API when it is not enabled: { \"reason\": \"API_DISABLED\" \"domain\": \"googleapis.com\" \"metadata\": { \"resource\": \"projects/123\", \"service\": \"pubsub.googleapis.com\" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { \"reason\": \"STOCKOUT\" \"domain\": \"spanner.googleapis.com\", \"metadata\": { \"availableRegions\": \"us-central1,us-east2\" } }", + "id": "ErrorInfo", + "properties": { + "domain": { + "description": "The logical grouping to which the \"reason\" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: \"pubsub.googleapis.com\". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is \"googleapis.com\".", + "type": "string" + }, + "metadatas": { + "additionalProperties": { + "type": "string" + }, + "description": "Additional structured details about this error. Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than {\"instanceLimit\": \"100/request\"}, should be returned as, {\"instanceLimitPerRequest\": \"100\"}, if the client exceeds the number of instances that can be created in a single (batch) request.", + "type": "object" + }, + "reason": { + "description": "The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE.", + "type": "string" + } + }, + "type": "object" + }, "ExchangedPeeringRoute": { "id": "ExchangedPeeringRoute", "properties": { @@ -36835,6 +38546,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -36863,6 +38575,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -37067,6 +38780,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -37095,6 +38809,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -37228,7 +38943,7 @@ "type": "array" }, "direction": { - "description": "Direction of traffic to which this firewall applies, either `INGRESS` or `EGRESS`. The default is `INGRESS`. For `INGRESS` traffic, you cannot specify the destinationRanges field, and for `EGRESS` traffic, you cannot specify the sourceRanges or sourceTags fields.", + "description": "Direction of traffic to which this firewall applies, either `INGRESS` or `EGRESS`. The default is `INGRESS`. For `EGRESS` traffic, you cannot specify the sourceTags fields.", "enum": [ "EGRESS", "INGRESS" @@ -37361,6 +39076,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -37389,6 +39105,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -37498,7 +39215,7 @@ "type": "string" }, "displayName": { - "description": "Deprecated, please use short name instead. User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. This name must be set on creation and cannot be changed. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Deprecated, please use short name instead. User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. This field is not applicable to network firewall policies. This name must be set on creation and cannot be changed. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, @@ -37518,11 +39235,11 @@ "type": "string" }, "name": { - "description": "[Output Only] Name of the resource. It is a numeric ID allocated by GCP which uniquely identifies the Firewall Policy.", + "description": "Name of the resource. For Organization Firewall Policies it's a [Output Only] numeric ID allocated by Google Cloud which uniquely identifies the Organization Firewall Policy.", "type": "string" }, "parent": { - "description": "[Output Only] The parent of the firewall policy.", + "description": "[Output Only] The parent of the firewall policy. This field is not applicable to network firewall policies.", "type": "string" }, "region": { @@ -37550,7 +39267,7 @@ "type": "string" }, "shortName": { - "description": "User-provided name of the Organization firewall plicy. The name should be unique in the organization in which the firewall policy is created. This name must be set on creation and cannot be changed. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. This field is not applicable to network firewall policies. This name must be set on creation and cannot be changed. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" } @@ -37620,6 +39337,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -37648,6 +39366,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -37877,7 +39596,7 @@ "id": "ForwardingRule", "properties": { "IPAddress": { - "description": "IP address for which this forwarding rule accepts traffic. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the referenced target or backendService. While creating a forwarding rule, specifying an IPAddress is required under the following circumstances: - When the target is set to targetGrpcProxy and validateForProxyless is set to true, the IPAddress should be set to 0.0.0.0. - When the target is a Private Service Connect Google APIs bundle, you must specify an IPAddress. Otherwise, you can optionally specify an IP address that references an existing static (reserved) IP address resource. When omitted, Google Cloud assigns an ephemeral IP address. Use one of the following formats to specify an IP address while creating a forwarding rule: * IP address number, as in `100.1.2.3` * Full resource URL, as in https://www.googleapis.com/compute/v1/projects/project_id/regions/region /addresses/address-name * Partial URL or by name, as in: - projects/project_id/regions/region/addresses/address-name - regions/region/addresses/address-name - global/addresses/address-name - address-name The forwarding rule's target or backendService, and in most cases, also the loadBalancingScheme, determine the type of IP address that you can use. For detailed information, see [IP address specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). When reading an IPAddress, the API always returns the IP address number.", + "description": "IP address for which this forwarding rule accepts traffic. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the referenced target or backendService. While creating a forwarding rule, specifying an IPAddress is required under the following circumstances: - When the target is set to targetGrpcProxy and validateForProxyless is set to true, the IPAddress should be set to 0.0.0.0. - When the target is a Private Service Connect Google APIs bundle, you must specify an IPAddress. Otherwise, you can optionally specify an IP address that references an existing static (reserved) IP address resource. When omitted, Google Cloud assigns an ephemeral IP address. Use one of the following formats to specify an IP address while creating a forwarding rule: * IP address number, as in `100.1.2.3` * IPv6 address range, as in `2600:1234::/96` * Full resource URL, as in https://www.googleapis.com/compute/v1/projects/ project_id/regions/region/addresses/address-name * Partial URL or by name, as in: - projects/project_id/regions/region/addresses/address-name - regions/region/addresses/address-name - global/addresses/address-name - address-name The forwarding rule's target or backendService, and in most cases, also the loadBalancingScheme, determine the type of IP address that you can use. For detailed information, see [IP address specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). When reading an IPAddress, the API always returns the IP address number.", "type": "string" }, "IPProtocol": { @@ -37903,7 +39622,7 @@ "type": "string" }, "allPorts": { - "description": "This field is used along with the backend_service field for Internal TCP/UDP Load Balancing or Network Load Balancing, or with the target field for internal and external TargetInstance. You can only use one of ports and port_range, or allPorts. The three are mutually exclusive. For TCP, UDP and SCTP traffic, packets addressed to any ports will be forwarded to the target or backendService.", + "description": "This field can only be used: - If IPProtocol is one of TCP, UDP, or SCTP. - By internal TCP/UDP load balancers, backend service-based network load balancers, and internal and external protocol forwarding. Set this field to true to allow packets addressed to any port or packets lacking destination port information (for example, UDP fragments after the first fragment) to be forwarded to the backends configured with this forwarding rule. The ports, port_range, and allPorts fields are mutually exclusive.", "type": "boolean" }, "allowGlobalAccess": { @@ -37933,7 +39652,7 @@ "type": "string" }, "ipVersion": { - "description": "The IP Version that will be used by this forwarding rule. Valid options are IPV4 or IPV6. This can only be specified for an external global forwarding rule.", + "description": "The IP Version that will be used by this forwarding rule. Valid options are IPV4 or IPV6.", "enum": [ "IPV4", "IPV6", @@ -38024,11 +39743,11 @@ "type": "boolean" }, "portRange": { - "description": "This field can be used only if: - Load balancing scheme is one of EXTERNAL, INTERNAL_SELF_MANAGED or INTERNAL_MANAGED - IPProtocol is one of TCP, UDP, or SCTP. Packets addressed to ports in the specified range will be forwarded to target or backend_service. You can only use one of ports, port_range, or allPorts. The three are mutually exclusive. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint ports. Some types of forwarding target have constraints on the acceptable ports. For more information, see [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications). @pattern: \\\\d+(?:-\\\\d+)?", + "description": "This field can only be used: - If IPProtocol is one of TCP, UDP, or SCTP. - By backend service-based network load balancers, target pool-based network load balancers, internal proxy load balancers, external proxy load balancers, Traffic Director, external protocol forwarding, and Classic VPN. Some products have restrictions on what ports can be used. See port specifications for details. Only packets addressed to ports in the specified range will be forwarded to the backends configured with this forwarding rule. The ports, port_range, and allPorts fields are mutually exclusive. For external forwarding rules, two or more forwarding rules cannot use the same [IPAddress, IPProtocol] pair, and cannot have overlapping portRanges. For internal forwarding rules within the same VPC network, two or more forwarding rules cannot use the same [IPAddress, IPProtocol] pair, and cannot have overlapping portRanges. @pattern: \\\\d+(?:-\\\\d+)?", "type": "string" }, "ports": { - "description": "The ports field is only supported when the forwarding rule references a backend_service directly. Only packets addressed to the [specified list of ports]((https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications)) are forwarded to backends. You can only use one of ports and port_range, or allPorts. The three are mutually exclusive. You can specify a list of up to five ports, which can be non-contiguous. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint ports. @pattern: \\\\d+(?:-\\\\d+)?", + "description": "This field can only be used: - If IPProtocol is one of TCP, UDP, or SCTP. - By internal TCP/UDP load balancers, backend service-based network load balancers, and internal protocol forwarding. You can specify a list of up to five ports by number, separated by commas. The ports can be contiguous or discontiguous. Only packets addressed to these ports will be forwarded to the backends configured with this forwarding rule. For external forwarding rules, two or more forwarding rules cannot use the same [IPAddress, IPProtocol] pair, and cannot share any values defined in ports. For internal forwarding rules within the same VPC network, two or more forwarding rules cannot use the same [IPAddress, IPProtocol] pair, and cannot share any values defined in ports. The ports, port_range, and allPorts fields are mutually exclusive. @pattern: \\\\d+(?:-\\\\d+)?", "items": { "type": "string" }, @@ -38043,6 +39762,7 @@ "enum": [ "ACCEPTED", "CLOSED", + "NEEDS_ATTENTION", "PENDING", "REJECTED", "STATUS_UNSPECIFIED" @@ -38050,6 +39770,7 @@ "enumDescriptions": [ "The connection has been accepted by the producer.", "The connection has been closed by the producer and will not serve traffic going forward.", + "The connection has been accepted by the producer, but the producer needs to take further action before the forwarding rule can serve traffic.", "The connection is pending acceptance by the producer.", "The connection has been rejected by the producer.", "" @@ -38085,6 +39806,7 @@ "type": "string" }, "target": { + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. - For load balancers, see the \"Target\" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). - For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: - vpc-sc - APIs that support VPC Service Controls. - all-apis - All supported Google APIs. - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. ", "type": "string" } }, @@ -38139,6 +39861,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -38167,6 +39890,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -38257,6 +39981,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -38285,6 +40010,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -38385,6 +40111,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -38413,6 +40140,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -38469,25 +40197,25 @@ "type": "string" }, "port": { - "description": "The port number for the health check request. Must be specified if port_name and port_specification are not set or if port_specification is USE_FIXED_PORT. Valid values are 1 through 65535.", + "description": "The TCP port number to which the health check prober sends packets. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. The port_name should conform to RFC1035.", + "description": "Not supported.", "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, gRPC health check follows behavior specified in port and portName fields.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT" ], "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." ], "type": "string" } @@ -38681,29 +40409,29 @@ "id": "HTTP2HealthCheck", "properties": { "host": { - "description": "The value of the host header in the HTTP/2 health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used.", + "description": "The value of the host header in the HTTP/2 health check request. If left empty (default value), the host header is set to the destination IP address to which health check packets are sent. The destination IP address depends on the type of load balancer. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest", "type": "string" }, "port": { - "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", + "description": "The TCP port number to which the health check prober sends packets. The default value is 443. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", + "description": "Not supported.", "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP2 health check follows behavior specified in port and portName fields.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT" ], "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." ], "type": "string" }, @@ -38724,7 +40452,7 @@ "type": "string" }, "response": { - "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII.", + "description": "Creates a content-based HTTP/2 health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http", "type": "string" } }, @@ -38734,29 +40462,29 @@ "id": "HTTPHealthCheck", "properties": { "host": { - "description": "The value of the host header in the HTTP health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used.", + "description": "The value of the host header in the HTTP health check request. If left empty (default value), the host header is set to the destination IP address to which health check packets are sent. The destination IP address depends on the type of load balancer. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest", "type": "string" }, "port": { - "description": "The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535.", + "description": "The TCP port number to which the health check prober sends packets. The default value is 80. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", + "description": "Not supported.", "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP health check follows behavior specified in port and portName fields.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT" ], "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." ], "type": "string" }, @@ -38777,7 +40505,7 @@ "type": "string" }, "response": { - "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII.", + "description": "Creates a content-based HTTP health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http", "type": "string" } }, @@ -38787,29 +40515,29 @@ "id": "HTTPSHealthCheck", "properties": { "host": { - "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used.", + "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the host header is set to the destination IP address to which health check packets are sent. The destination IP address depends on the type of load balancer. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest", "type": "string" }, "port": { - "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", + "description": "The TCP port number to which the health check prober sends packets. The default value is 443. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", + "description": "Not supported.", "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTPS health check follows behavior specified in port and portName fields.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT" ], "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." ], "type": "string" }, @@ -38830,7 +40558,7 @@ "type": "string" }, "response": { - "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII.", + "description": "Creates a content-based HTTPS health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http", "type": "string" } }, @@ -38980,6 +40708,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -39008,6 +40737,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -39095,14 +40825,14 @@ "type": "string" }, "healthChecks": { - "description": "A list of URLs to the HealthCheck resources. Must have at least one HealthCheck, and not more than 10. HealthCheck resources must have portSpecification=USE_SERVING_PORT or portSpecification=USE_FIXED_PORT. For regional HealthCheckService, the HealthCheck must be regional and in the same region. For global HealthCheckService, HealthCheck must be global. Mix of regional and global HealthChecks is not supported. Multiple regional HealthChecks must belong to the same region. Regional HealthChecks must belong to the same region as zones of NEGs.", + "description": "A list of URLs to the HealthCheck resources. Must have at least one HealthCheck, and not more than 10 for regional HealthCheckService, and not more than 1 for global HealthCheckService. HealthCheck resources must have portSpecification=USE_SERVING_PORT or portSpecification=USE_FIXED_PORT. For regional HealthCheckService, the HealthCheck must be regional and in the same region. For global HealthCheckService, HealthCheck must be global. Mix of regional and global HealthChecks is not supported. Multiple regional HealthChecks must belong to the same region. Regional HealthChecks must belong to the same region as zones of NetworkEndpointGroups. For global HealthCheckService using global INTERNET_IP_PORT NetworkEndpointGroups, the global HealthChecks must specify sourceRegions, and HealthChecks that specify sourceRegions can only be used with global INTERNET_IP_PORT NetworkEndpointGroups.", "items": { "type": "string" }, "type": "array" }, "healthStatusAggregationPolicy": { - "description": "Optional. Policy for how the results from multiple health checks for the same endpoint are aggregated. Defaults to NO_AGGREGATION if unspecified. - NO_AGGREGATION. An EndpointHealth message is returned for each pair in the health check service. - AND. If any health check of an endpoint reports UNHEALTHY, then UNHEALTHY is the HealthState of the endpoint. If all health checks report HEALTHY, the HealthState of the endpoint is HEALTHY. .", + "description": "Optional. Policy for how the results from multiple health checks for the same endpoint are aggregated. Defaults to NO_AGGREGATION if unspecified. - NO_AGGREGATION. An EndpointHealth message is returned for each pair in the health check service. - AND. If any health check of an endpoint reports UNHEALTHY, then UNHEALTHY is the HealthState of the endpoint. If all health checks report HEALTHY, the HealthState of the endpoint is HEALTHY. . This is only allowed with regional HealthCheckService.", "enum": [ "AND", "NO_AGGREGATION" @@ -39129,7 +40859,7 @@ "type": "string" }, "networkEndpointGroups": { - "description": "A list of URLs to the NetworkEndpointGroup resources. Must not have more than 100. For regional HealthCheckService, NEGs must be in zones in the region of the HealthCheckService.", + "description": "A list of URLs to the NetworkEndpointGroup resources. Must not have more than 100. For regional HealthCheckService, NEGs must be in zones in the region of the HealthCheckService. For global HealthCheckServices, the NetworkEndpointGroups must be global INTERNET_IP_PORT.", "items": { "type": "string" }, @@ -39204,6 +40934,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -39232,6 +40963,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -39329,6 +41061,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -39357,6 +41090,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -39429,6 +41163,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -39457,6 +41192,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -39607,6 +41343,35 @@ }, "type": "object" }, + "Help": { + "description": "Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit.", + "id": "Help", + "properties": { + "links": { + "description": "URL(s) pointing to additional information on handling the current error.", + "items": { + "$ref": "HelpLink" + }, + "type": "array" + } + }, + "type": "object" + }, + "HelpLink": { + "description": "Describes a URL link.", + "id": "HelpLink", + "properties": { + "description": { + "description": "Describes what the link offers.", + "type": "string" + }, + "url": { + "description": "The URL of the link.", + "type": "string" + } + }, + "type": "object" + }, "HostRule": { "description": "UrlMaps A host-matching rule for a URL. If matched, will use the named PathMatcher to select the BackendService.", "id": "HostRule", @@ -39741,7 +41506,7 @@ "description": "The header value must be an integer and its value must be in the range specified in rangeMatch. If the header does not contain an integer, number or is empty, the match fails. For example for a range [-5, 0] - -3 will match. - 0 will not match. - 0.25 will not match. - -3someString will not match. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. rangeMatch is not supported for load balancers that have loadBalancingScheme set to EXTERNAL." }, "regexMatch": { - "description": "The value of the header must match the regular expression specified in regexMatch. For more information about regular expression syntax, see Syntax. For matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "description": "The value of the header must match the regular expression specified in regexMatch. For more information about regular expression syntax, see Syntax. For matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. Regular expressions can only be used when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED.", "type": "string" }, "suffixMatch": { @@ -39879,6 +41644,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -39907,6 +41673,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -39972,7 +41739,7 @@ "type": "boolean" }, "regexMatch": { - "description": "The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For more information about regular expression syntax, see Syntax. Only one of presentMatch, exactMatch, or regexMatch must be set. regexMatch only applies when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED. ", + "description": "The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For more information about regular expression syntax, see Syntax. Only one of presentMatch, exactMatch, or regexMatch must be set. Regular expressions can only be used when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED. ", "type": "string" } }, @@ -40113,7 +41880,7 @@ }, "routeAction": { "$ref": "HttpRouteAction", - "description": "In response to a matching matchRule, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a route rule's routeAction." + "description": "In response to a matching matchRule, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within a route rule's routeAction." }, "service": { "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set.", @@ -40164,7 +41931,7 @@ "type": "array" }, "regexMatch": { - "description": "For satisfying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For more information about regular expression syntax, see Syntax. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "description": "For satisfying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For more information about regular expression syntax, see Syntax. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. Regular expressions can only be used when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED.", "type": "string" } }, @@ -40279,6 +42046,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -40307,6 +42075,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -40359,6 +42128,20 @@ "description": "Represents an Image resource. You can use images to create boot disks for your VM instances. For more information, read Images.", "id": "Image", "properties": { + "architecture": { + "description": "The architecture of the image. Valid values are ARM64 or X86_64.", + "enum": [ + "ARCHITECTURE_UNSPECIFIED", + "ARM64", + "X86_64" + ], + "enumDescriptions": [ + "Default value indicating Architecture is not set.", + "Machines with architecture ARM64", + "Machines with architecture X86_64" + ], + "type": "string" + }, "archiveSizeBytes": { "description": "Size of the image tar.gz archive stored in Google Cloud Storage (in bytes).", "format": "int64", @@ -40605,6 +42388,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -40633,6 +42417,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -40776,6 +42561,20 @@ "format": "uint64", "type": "string" }, + "keyRevocationActionType": { + "description": "KeyRevocationActionType of the instance. Supported options are \"STOP\" and \"NONE\". The default value is \"NONE\" if it is not specified.", + "enum": [ + "KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED", + "NONE", + "STOP" + ], + "enumDescriptions": [ + "Default value. This value is unused.", + "Indicates user chose no operation.", + "Indicates user chose to opt for VM shutdown on key revocation." + ], + "type": "string" + }, "kind": { "default": "compute#instance", "description": "[Output Only] Type of the resource. Always compute#instance for instances.", @@ -40871,6 +42670,10 @@ }, "type": "array" }, + "resourceStatus": { + "$ref": "ResourceStatus", + "description": "[Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field." + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -41000,6 +42803,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -41028,6 +42832,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -41076,6 +42881,46 @@ }, "type": "object" }, + "InstanceConsumptionData": { + "id": "InstanceConsumptionData", + "properties": { + "consumptionInfo": { + "$ref": "InstanceConsumptionInfo", + "description": "Resources consumed by the instance." + }, + "instance": { + "description": "Server-defined URL for the instance.", + "type": "string" + } + }, + "type": "object" + }, + "InstanceConsumptionInfo": { + "id": "InstanceConsumptionInfo", + "properties": { + "guestCpus": { + "description": "The number of virtual CPUs that are available to the instance.", + "format": "int32", + "type": "integer" + }, + "localSsdGb": { + "description": "The amount of local SSD storage available to the instance, defined in GiB.", + "format": "int32", + "type": "integer" + }, + "memoryMb": { + "description": "The amount of physical memory available to the instance, defined in MiB.", + "format": "int32", + "type": "integer" + }, + "minNodeCpus": { + "description": "The minimal guaranteed number of virtual CPUs that are reserved.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "InstanceGroup": { "description": "Represents an Instance Group resource. Instance Groups can be used to configure a target for load balancing. Instance groups can either be managed or unmanaged. To create managed instance groups, use the instanceGroupManager or regionInstanceGroupManager resource instead. Use zonal unmanaged instance groups if you need to apply load balancing to groups of heterogeneous instances or if you need to manage the instances yourself. You cannot create regional unmanaged instance groups. For more information, read Instance groups.", "id": "InstanceGroup", @@ -41197,6 +43042,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -41225,6 +43071,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -41315,6 +43162,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -41343,6 +43191,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -41451,6 +43300,18 @@ "description": "[Output Only] The resource type, which is always compute#instanceGroupManager for managed instance groups.", "type": "string" }, + "listManagedInstancesResults": { + "description": "Pagination behavior of the listManagedInstances API method for this managed instance group.", + "enum": [ + "PAGELESS", + "PAGINATED" + ], + "enumDescriptions": [ + "(Default) Pagination is disabled for the group's listManagedInstances API method. maxResults and pageToken query parameters are ignored and all instances are returned in a single response.", + "Pagination is enabled for the group's listManagedInstances API method. maxResults and pageToken query parameters are respected." + ], + "type": "string" + }, "name": { "annotations": { "required": [ @@ -41641,6 +43502,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -41669,6 +43531,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -41774,6 +43637,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -41802,6 +43666,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -42176,6 +44041,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -42204,6 +44070,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -42303,6 +44170,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -42331,6 +44199,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -42475,6 +44344,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -42503,6 +44373,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -42606,6 +44477,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -42634,6 +44506,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -42742,6 +44615,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -42770,6 +44644,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -42860,6 +44735,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -42888,6 +44764,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -43077,6 +44954,20 @@ }, "type": "array" }, + "keyRevocationActionType": { + "description": "KeyRevocationActionType of the instance. Supported options are \"STOP\" and \"NONE\". The default value is \"NONE\" if it is not specified.", + "enum": [ + "KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED", + "NONE", + "STOP" + ], + "enumDescriptions": [ + "Default value. This value is unused.", + "Indicates user chose no operation.", + "Indicates user chose to opt for VM shutdown on key revocation." + ], + "type": "string" + }, "labels": { "additionalProperties": { "type": "string" @@ -43269,6 +45160,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -43297,6 +45189,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -43501,6 +45394,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -43529,6 +45423,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -43774,7 +45669,7 @@ "type": "string" }, "nocContactEmail": { - "description": "Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Stackdriver logs alerting and Cloud Notifications.", + "description": "Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Cloud Monitoring logs alerting and Cloud Notifications. This field is required for users who sign up for Cloud Interconnect using workforce identity federation.", "type": "string" }, "operationalStatus": { @@ -43932,13 +45827,13 @@ "type": "string" }, "encryption": { - "description": "Indicates the user-supplied encryption option of this VLAN attachment (interconnectAttachment). Can only be specified at attachment creation for PARTNER or DEDICATED attachments. Possible values are: - NONE - This is the default value, which means that the VLAN attachment carries unencrypted traffic. VMs are able to send traffic to, or receive traffic from, such a VLAN attachment. - IPSEC - The VLAN attachment carries only encrypted traffic that is encrypted by an IPsec device, such as an HA VPN gateway or third-party IPsec VPN. VMs cannot directly send traffic to, or receive traffic from, such a VLAN attachment. To use *IPsec-encrypted Cloud Interconnect*, the VLAN attachment must be created with this option. Not currently available publicly. ", + "description": "Indicates the user-supplied encryption option of this VLAN attachment (interconnectAttachment). Can only be specified at attachment creation for PARTNER or DEDICATED attachments. Possible values are: - NONE - This is the default value, which means that the VLAN attachment carries unencrypted traffic. VMs are able to send traffic to, or receive traffic from, such a VLAN attachment. - IPSEC - The VLAN attachment carries only encrypted traffic that is encrypted by an IPsec device, such as an HA VPN gateway or third-party IPsec VPN. VMs cannot directly send traffic to, or receive traffic from, such a VLAN attachment. To use *HA VPN over Cloud Interconnect*, the VLAN attachment must be created with this option. ", "enum": [ "IPSEC", "NONE" ], "enumDescriptions": [ - "The interconnect attachment will carry only encrypted traffic that is encrypted by an IPsec device such as HA VPN gateway; VMs cannot directly send traffic to or receive traffic from such an interconnect attachment. To use IPsec-encrypted Cloud Interconnect, the interconnect attachment must be created with this option.", + "The interconnect attachment will carry only encrypted traffic that is encrypted by an IPsec device such as HA VPN gateway; VMs cannot directly send traffic to or receive traffic from such an interconnect attachment. To use HA VPN over Cloud Interconnect, the interconnect attachment must be created with this option.", "This is the default value, which means the Interconnect Attachment will carry unencrypted traffic. VMs will be able to send traffic to or receive traffic from such interconnect attachment." ], "type": "string" @@ -44128,6 +46023,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -44156,6 +46052,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -44246,6 +46143,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -44274,6 +46172,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -44377,6 +46276,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -44405,6 +46305,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -44483,6 +46384,30 @@ }, "type": "array" }, + "bundleAggregationType": { + "description": "The aggregation type of the bundle interface.", + "enum": [ + "BUNDLE_AGGREGATION_TYPE_LACP", + "BUNDLE_AGGREGATION_TYPE_STATIC" + ], + "enumDescriptions": [ + "LACP is enabled.", + "LACP is disabled." + ], + "type": "string" + }, + "bundleOperationalStatus": { + "description": "The operational status of the bundle interface.", + "enum": [ + "BUNDLE_OPERATIONAL_STATUS_DOWN", + "BUNDLE_OPERATIONAL_STATUS_UP" + ], + "enumDescriptions": [ + "If bundleAggregationType is LACP: LACP is not established and/or all links in the bundle have DOWN operational status. If bundleAggregationType is STATIC: one or more links in the bundle has DOWN operational status.", + "If bundleAggregationType is LACP: LACP is established and at least one link in the bundle has UP operational status. If bundleAggregationType is STATIC: all links in the bundle (typically just one) have UP operational status." + ], + "type": "string" + }, "links": { "description": "A list of InterconnectDiagnostics.LinkStatus objects, describing the status for each link on the Interconnect.", "items": { @@ -44588,6 +46513,18 @@ "lacpStatus": { "$ref": "InterconnectDiagnosticsLinkLACPStatus" }, + "operationalStatus": { + "description": "The operational status of the link.", + "enum": [ + "LINK_OPERATIONAL_STATUS_DOWN", + "LINK_OPERATIONAL_STATUS_UP" + ], + "enumDescriptions": [ + "The interface is unable to communicate with the remote end.", + "The interface has low level communication with the remote end." + ], + "type": "string" + }, "receivingOpticalPower": { "$ref": "InterconnectDiagnosticsLinkOpticalPower", "description": "An InterconnectDiagnostics.LinkOpticalPower object, describing the current value and status of the received light level." @@ -44641,6 +46578,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -44669,6 +46607,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -44867,6 +46806,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -44895,6 +46835,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -45272,6 +47213,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -45300,6 +47242,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -45368,6 +47311,21 @@ }, "type": "object" }, + "LocalizedMessage": { + "description": "Provides a localized error message that is safe to return to the user which can be attached to an RPC error.", + "id": "LocalizedMessage", + "properties": { + "locale": { + "description": "The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: \"en-US\", \"fr-CH\", \"es-MX\"", + "type": "string" + }, + "message": { + "description": "The localized error message in the above locale.", + "type": "string" + } + }, + "type": "object" + }, "LocationPolicy": { "description": "Configuration for location policy among multiple possible locations (e.g. preferences for zone selection among zones in a single region).", "id": "LocationPolicy", @@ -45399,8 +47357,12 @@ "LocationPolicyLocation": { "id": "LocationPolicyLocation", "properties": { + "constraints": { + "$ref": "LocationPolicyLocationConstraints", + "description": "Constraints that the caller requires on the result distribution in this zone." + }, "preference": { - "description": "Preference for a given location.", + "description": "Preference for a given location. Set to either ALLOW or DENY.", "enum": [ "ALLOW", "DENY", @@ -45416,6 +47378,18 @@ }, "type": "object" }, + "LocationPolicyLocationConstraints": { + "description": "Per-zone constraints on location policy for this zone.", + "id": "LocationPolicyLocationConstraints", + "properties": { + "maxCount": { + "description": "Maximum number of items that are allowed to be placed in this zone. The value must be non-negative.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "LogConfig": { "description": "This is deprecated and has no effect. Do not use.", "id": "LogConfig", @@ -45665,6 +47639,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -45693,6 +47668,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -45893,6 +47869,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -45921,6 +47898,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -46011,6 +47989,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -46039,6 +48018,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -46111,6 +48091,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -46139,6 +48120,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -46330,6 +48312,27 @@ "description": "[Output Only] The error type identifier for this error.", "type": "string" }, + "errorDetails": { + "description": "[Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED.", + "items": { + "properties": { + "errorInfo": { + "$ref": "ErrorInfo" + }, + "help": { + "$ref": "Help" + }, + "localizedMessage": { + "$ref": "LocalizedMessage" + }, + "quotaInfo": { + "$ref": "QuotaExceededInfo" + } + }, + "type": "object" + }, + "type": "array" + }, "location": { "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional.", "type": "string" @@ -46495,6 +48498,10 @@ "description": "Enable ULA internal ipv6 on this network. Enabling this feature will assign a /48 from google defined ULA prefix fd20::/20. .", "type": "boolean" }, + "firewallPolicy": { + "description": "[Output Only] URL of the firewall policy the network is associated with.", + "type": "string" + }, "gatewayIPv4": { "description": "[Output Only] The gateway address for default routing out of the network, selected by GCP.", "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", @@ -46515,7 +48522,7 @@ "type": "string" }, "mtu": { - "description": "Maximum Transmission Unit in bytes. The minimum value for this field is 1460 and the maximum value is 1500 bytes. If unspecified, defaults to 1460.", + "description": "Maximum Transmission Unit in bytes. The minimum value for this field is 1300 and the maximum value is 8896. The suggested value is 1500, which is the default MTU used on the Internet, or 8896 if you want to use Jumbo frames. If unspecified, the value defaults to 1460.", "format": "int32", "type": "integer" }, @@ -46570,10 +48577,30 @@ }, "type": "object" }, - "NetworkEdgeSecurityService": { - "description": "Represents a Google Cloud Armor network edge security service resource.", - "id": "NetworkEdgeSecurityService", + "NetworkAttachment": { + "description": "NetworkAttachments A network attachment resource ...", + "id": "NetworkAttachment", "properties": { + "connectionEndpoints": { + "description": "[Output Only] An array of connections for all the producers connected to this network attachment.", + "items": { + "$ref": "NetworkAttachmentConnectedEndpoint" + }, + "type": "array" + }, + "connectionPreference": { + "enum": [ + "ACCEPT_AUTOMATIC", + "ACCEPT_MANUAL", + "INVALID" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -46583,31 +48610,50 @@ "type": "string" }, "fingerprint": { - "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a NetworkEdgeSecurityService. An up-to-date fingerprint must be provided in order to update the NetworkEdgeSecurityService, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a NetworkEdgeSecurityService.", + "description": "[Output Only] Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", "format": "byte", "type": "string" }, "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "description": "[Output Only] The unique identifier for the resource type. The server generates this identifier.", "format": "uint64", "type": "string" }, "kind": { - "default": "compute#networkEdgeSecurityService", - "description": "[Output only] Type of the resource. Always compute#networkEdgeSecurityService for NetworkEdgeSecurityServices", + "default": "compute#networkAttachment", + "description": "[Output Only] Type of the resource.", "type": "string" }, "name": { + "annotations": { + "required": [ + "compute.networkAttachments.insert" + ] + }, "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "region": { - "description": "[Output Only] URL of the region where the resource resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "network": { + "description": "[Output Only] The URL of the network which the Network Attachment belongs to.", "type": "string" }, - "securityPolicy": { - "description": "The resource URL for the network edge security service associated with this network edge security service.", + "producerAcceptLists": { + "description": "Projects that are allowed to connect to this network attachment. The project can be specified using its id or number.", + "items": { + "type": "string" + }, + "type": "array" + }, + "producerRejectLists": { + "description": "Projects that are not allowed to connect to this network attachment. The project can be specified using its id or number.", + "items": { + "type": "string" + }, + "type": "array" + }, + "region": { + "description": "[Output Only] URL of the region where the network attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" }, "selfLink": { @@ -46615,33 +48661,37 @@ "type": "string" }, "selfLinkWithId": { - "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "description": "[Output Only] Server-defined URL for this resource's resource id.", "type": "string" + }, + "subnetworks": { + "description": "An array of URLs where each entry is the URL of a subnet provided by the service consumer to use for endpoints in the producers that connect to this network attachment.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, - "NetworkEdgeSecurityServiceAggregatedList": { - "id": "NetworkEdgeSecurityServiceAggregatedList", + "NetworkAttachmentAggregatedList": { + "description": "Contains a list of NetworkAttachmentsScopedList.", + "id": "NetworkAttachmentAggregatedList", "properties": { - "etag": { - "type": "string" - }, "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { "additionalProperties": { - "$ref": "NetworkEdgeSecurityServicesScopedList", - "description": "Name of the scope containing this set of security policies." + "$ref": "NetworkAttachmentsScopedList", + "description": "Name of the scope containing this set of NetworkAttachments." }, - "description": "A list of NetworkEdgeSecurityServicesScopedList resources.", + "description": "A list of NetworkAttachmentsScopedList resources.", "type": "object" }, "kind": { - "default": "compute#networkEdgeSecurityServiceAggregatedList", - "description": "[Output Only] Type of resource. Always compute#networkEdgeSecurityServiceAggregatedList for lists of Network Edge Security Services.", + "default": "compute#networkAttachmentAggregatedList", "type": "string" }, "nextPageToken": { @@ -46652,13 +48702,6 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "unreachables": { - "description": "[Output Only] Unreachable resources.", - "items": { - "type": "string" - }, - "type": "array" - }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -46673,6 +48716,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -46701,6 +48745,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -46749,18 +48794,80 @@ }, "type": "object" }, - "NetworkEdgeSecurityServicesScopedList": { - "id": "NetworkEdgeSecurityServicesScopedList", + "NetworkAttachmentConnectedEndpoint": { + "description": "[Output Only] A connection connected to this network attachment.", + "id": "NetworkAttachmentConnectedEndpoint", "properties": { - "networkEdgeSecurityServices": { - "description": "A list of NetworkEdgeSecurityServices contained in this scope.", + "ipAddress": { + "description": "The IP address assigned to the producer instance network interface. This value will be a range in case of Serverless.", + "type": "string" + }, + "projectIdOrNum": { + "description": "The project id or number of the interface to which the IP was assigned.", + "type": "string" + }, + "secondaryIpCidrRanges": { + "description": "Alias IP ranges from the same subnetwork", "items": { - "$ref": "NetworkEdgeSecurityService" + "type": "string" }, "type": "array" }, + "status": { + "description": "The status of a connected endpoint to this network attachment.", + "enum": [ + "ACCEPTED", + "CLOSED", + "NEEDS_ATTENTION", + "PENDING", + "REJECTED", + "STATUS_UNSPECIFIED" + ], + "enumDescriptions": [ + "The consumer allows traffic from the producer to reach its VPC.", + "The consumer network attachment no longer exists.", + "The consumer needs to take further action before traffic can be served.", + "The consumer neither allows nor prohibits traffic from the producer to reach its VPC.", + "The consumer prohibits traffic from the producer to reach its VPC.", + "" + ], + "type": "string" + }, + "subnetwork": { + "description": "The subnetwork used to assign the IP to the producer instance network interface.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkAttachmentList": { + "id": "NetworkAttachmentList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of NetworkAttachment resources.", + "items": { + "$ref": "NetworkAttachment" + }, + "type": "array" + }, + "kind": { + "default": "compute#networkAttachmentList", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { - "description": "Informational warning which replaces the list of security policies when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -46773,6 +48880,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -46801,6 +48909,392 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "NetworkAttachmentsScopedList": { + "id": "NetworkAttachmentsScopedList", + "properties": { + "networkAttachments": { + "description": "A list of NetworkAttachments contained in this scope.", + "items": { + "$ref": "NetworkAttachment" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of network attachments when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "NetworkEdgeSecurityService": { + "description": "Represents a Google Cloud Armor network edge security service resource.", + "id": "NetworkEdgeSecurityService", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a NetworkEdgeSecurityService. An up-to-date fingerprint must be provided in order to update the NetworkEdgeSecurityService, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a NetworkEdgeSecurityService.", + "format": "byte", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#networkEdgeSecurityService", + "description": "[Output only] Type of the resource. Always compute#networkEdgeSecurityService for NetworkEdgeSecurityServices", + "type": "string" + }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "region": { + "description": "[Output Only] URL of the region where the resource resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, + "securityPolicy": { + "description": "The resource URL for the network edge security service associated with this network edge security service.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEdgeSecurityServiceAggregatedList": { + "id": "NetworkEdgeSecurityServiceAggregatedList", + "properties": { + "etag": { + "type": "string" + }, + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "NetworkEdgeSecurityServicesScopedList", + "description": "Name of the scope containing this set of security policies." + }, + "description": "A list of NetworkEdgeSecurityServicesScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#networkEdgeSecurityServiceAggregatedList", + "description": "[Output Only] Type of resource. Always compute#networkEdgeSecurityServiceAggregatedList for lists of Network Edge Security Services.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "NetworkEdgeSecurityServicesScopedList": { + "id": "NetworkEdgeSecurityServicesScopedList", + "properties": { + "networkEdgeSecurityServices": { + "description": "A list of NetworkEdgeSecurityServices contained in this scope.", + "items": { + "$ref": "NetworkEdgeSecurityService" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of security policies when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -46956,6 +49450,9 @@ ], "type": "string" }, + "pscData": { + "$ref": "NetworkEndpointGroupPscData" + }, "pscTargetService": { "description": "The target service url used to set up private service connection to a Google API or a PSC Producer Service Attachment. An example value is: \"asia-northeast3-cloudkms.googleapis.com\"", "type": "string" @@ -47033,6 +49530,179 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "NetworkEndpointGroupAppEngine": { + "description": "Configuration for an App Engine network endpoint group (NEG). The service is optional, may be provided explicitly or in the URL mask. The version is optional and can only be provided explicitly or in the URL mask when service is present. Note: App Engine service must be in the same project and located in the same region as the Serverless NEG.", + "id": "NetworkEndpointGroupAppEngine", + "properties": { + "service": { + "description": "Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: \"default\", \"my-service\".", + "type": "string" + }, + "urlMask": { + "description": "A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs \"foo1-dot-appname.appspot.com/v1\" and \"foo1-dot-appname.appspot.com/v2\" can be backed by the same Serverless NEG with URL mask \"\u003cservice\u003e-dot-appname.appspot.com/\u003cversion\u003e\". The URL mask will parse them to { service = \"foo1\", version = \"v1\" } and { service = \"foo1\", version = \"v2\" } respectively.", + "type": "string" + }, + "version": { + "description": "Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: \"v1\", \"v2\".", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupCloudFunction": { + "description": "Configuration for a Cloud Function network endpoint group (NEG). The function must be provided explicitly or in the URL mask. Note: Cloud Function must be in the same project and located in the same region as the Serverless NEG.", + "id": "NetworkEndpointGroupCloudFunction", + "properties": { + "function": { + "description": "A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: \"func1\".", + "type": "string" + }, + "urlMask": { + "description": "A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs \" mydomain.com/function1\" and \"mydomain.com/function2\" can be backed by the same Serverless NEG with URL mask \"/\u003cfunction\u003e\". The URL mask will parse them to { function = \"function1\" } and { function = \"function2\" } respectively.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupCloudRun": { + "description": "Configuration for a Cloud Run network endpoint group (NEG). The service must be provided explicitly or in the URL mask. The tag is optional, may be provided explicitly or in the URL mask. Note: Cloud Run service must be in the same project and located in the same region as the Serverless NEG.", + "id": "NetworkEndpointGroupCloudRun", + "properties": { + "service": { + "description": "Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: \"run-service\".", + "type": "string" + }, + "tag": { + "description": "Optional Cloud Run tag represents the \"named-revision\" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: \"revision-0010\".", + "type": "string" + }, + "urlMask": { + "description": "A template to parse \u003cservice\u003e and \u003ctag\u003e fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs \"foo1.domain.com/bar1\" and \"foo1.domain.com/bar2\" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask \"\u003ctag\u003e.domain.com/\u003cservice\u003e\". The URL mask will parse them to { service=\"bar1\", tag=\"foo1\" } and { service=\"bar2\", tag=\"foo2\" } respectively.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupList": { + "id": "NetworkEndpointGroupList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of NetworkEndpointGroup resources.", + "items": { + "$ref": "NetworkEndpointGroup" + }, + "type": "array" + }, + "kind": { + "default": "compute#networkEndpointGroupList", + "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupList for network endpoint group lists.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -47061,6 +49731,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -47109,176 +49780,42 @@ }, "type": "object" }, - "NetworkEndpointGroupAppEngine": { - "description": "Configuration for an App Engine network endpoint group (NEG). The service is optional, may be provided explicitly or in the URL mask. The version is optional and can only be provided explicitly or in the URL mask when service is present. Note: App Engine service must be in the same project and located in the same region as the Serverless NEG.", - "id": "NetworkEndpointGroupAppEngine", + "NetworkEndpointGroupPscData": { + "description": "All data that is specifically relevant to only network endpoint groups of type PRIVATE_SERVICE_CONNECT.", + "id": "NetworkEndpointGroupPscData", "properties": { - "service": { - "description": "Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: \"default\", \"my-service\".", - "type": "string" - }, - "urlMask": { - "description": "A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs \"foo1-dot-appname.appspot.com/v1\" and \"foo1-dot-appname.appspot.com/v2\" can be backed by the same Serverless NEG with URL mask \"-dot-appname.appspot.com/\". The URL mask will parse them to { service = \"foo1\", version = \"v1\" } and { service = \"foo1\", version = \"v2\" } respectively.", - "type": "string" - }, - "version": { - "description": "Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: \"v1\", \"v2\".", - "type": "string" - } - }, - "type": "object" - }, - "NetworkEndpointGroupCloudFunction": { - "description": "Configuration for a Cloud Function network endpoint group (NEG). The function must be provided explicitly or in the URL mask. Note: Cloud Function must be in the same project and located in the same region as the Serverless NEG.", - "id": "NetworkEndpointGroupCloudFunction", - "properties": { - "function": { - "description": "A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: \"func1\".", - "type": "string" - }, - "urlMask": { - "description": "A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs \" mydomain.com/function1\" and \"mydomain.com/function2\" can be backed by the same Serverless NEG with URL mask \"/\". The URL mask will parse them to { function = \"function1\" } and { function = \"function2\" } respectively.", - "type": "string" - } - }, - "type": "object" - }, - "NetworkEndpointGroupCloudRun": { - "description": "Configuration for a Cloud Run network endpoint group (NEG). The service must be provided explicitly or in the URL mask. The tag is optional, may be provided explicitly or in the URL mask. Note: Cloud Run service must be in the same project and located in the same region as the Serverless NEG.", - "id": "NetworkEndpointGroupCloudRun", - "properties": { - "service": { - "description": "Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: \"run-service\".", + "consumerPscAddress": { + "description": "[Output Only] Address allocated from given subnetwork for PSC. This IP address acts as a VIP for a PSC NEG, allowing it to act as an endpoint in L7 PSC-XLB.", "type": "string" }, - "tag": { - "description": "Optional Cloud Run tag represents the \"named-revision\" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: \"revision-0010\".", + "pscConnectionId": { + "description": "[Output Only] The PSC connection id of the PSC Network Endpoint Group Consumer.", + "format": "uint64", "type": "string" }, - "urlMask": { - "description": "A template to parse \u003cservice\u003e and \u003ctag\u003e fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs \"foo1.domain.com/bar1\" and \"foo1.domain.com/bar2\" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask \"\u003ctag\u003e.domain.com/\u003cservice\u003e\". The URL mask will parse them to { service=\"bar1\", tag=\"foo1\" } and { service=\"bar2\", tag=\"foo2\" } respectively.", + "pscConnectionStatus": { + "description": "[Output Only] The connection status of the PSC Forwarding Rule.", + "enum": [ + "ACCEPTED", + "CLOSED", + "NEEDS_ATTENTION", + "PENDING", + "REJECTED", + "STATUS_UNSPECIFIED" + ], + "enumDescriptions": [ + "The connection has been accepted by the producer.", + "The connection has been closed by the producer and will not serve traffic going forward.", + "The connection has been accepted by the producer, but the producer needs to take further action before the forwarding rule can serve traffic.", + "The connection is pending acceptance by the producer.", + "The connection has been rejected by the producer.", + "" + ], "type": "string" } }, "type": "object" }, - "NetworkEndpointGroupList": { - "id": "NetworkEndpointGroupList", - "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of NetworkEndpointGroup resources.", - "items": { - "$ref": "NetworkEndpointGroup" - }, - "type": "array" - }, - "kind": { - "default": "compute#networkEndpointGroupList", - "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupList for network endpoint group lists.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "LARGE_DEPLOYMENT_WARNING", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "PARTIAL_SUCCESS", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "Warning about failed cleanup of transient changes made by a failed operation.", - "A link to a deprecated resource was created.", - "When deploying and at least one of the resources has a type marked as deprecated", - "The user created a boot disk that is larger than image size.", - "When deploying and at least one of the resources has a type marked as experimental", - "Warning that is present in an external api call", - "Warning that value of a field has been overridden. Deprecated unused field.", - "The operation involved use of an injected kernel, which is deprecated.", - "When deploying a deployment with a exceedingly large number of resources", - "A resource depends on a missing type", - "The route's nextHopIp address is not assigned to an instance on the network.", - "The route's next hop instance cannot ip forward.", - "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", - "The route's nextHopInstance URL refers to an instance that does not exist.", - "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", - "The route's next hop instance does not have a status of RUNNING.", - "Error which is not critical. We decided to continue the process despite the mentioned error.", - "No results are present on a particular list page.", - "Success is reported, but some results may be missing due to errors", - "The user attempted to use a resource that requires a TOS they have not accepted.", - "Warning that a resource is in use.", - "One or more of the resources set to auto-delete could not be deleted because they were in use.", - "When a resource schema validation is ignored.", - "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", - "When undeclared properties in the schema are present", - "A given scope cannot be reached." - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, "NetworkEndpointGroupsAttachEndpointsRequest": { "id": "NetworkEndpointGroupsAttachEndpointsRequest", "properties": { @@ -47360,6 +49897,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -47388,6 +49926,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -47460,6 +49999,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -47488,6 +50028,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -47701,6 +50242,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -47729,6 +50271,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -48055,6 +50598,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "shareSettings": { + "$ref": "ShareSettings", + "description": "Share-settings for the node group" + }, "size": { "description": "[Output Only] The total number of nodes in the node group.", "format": "int32", @@ -48131,6 +50678,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -48159,6 +50707,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -48281,6 +50830,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -48309,6 +50859,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -48382,6 +50933,10 @@ }, "type": "array" }, + "consumedResources": { + "$ref": "InstanceConsumptionInfo", + "description": "Node resources that are reserved by all instances." + }, "cpuOvercommitType": { "description": "CPU overcommit.", "enum": [ @@ -48403,6 +50958,13 @@ }, "type": "array" }, + "instanceConsumptionData": { + "description": "Instance data that shows consumed resources on the node.", + "items": { + "$ref": "InstanceConsumptionData" + }, + "type": "array" + }, "instances": { "description": "Instances scheduled on this node.", "items": { @@ -48446,6 +51008,10 @@ "" ], "type": "string" + }, + "totalResources": { + "$ref": "InstanceConsumptionInfo", + "description": "Total amount of available resources on the node." } }, "type": "object" @@ -48515,6 +51081,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -48543,6 +51110,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -48615,6 +51183,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -48643,6 +51212,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -48852,6 +51422,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -48880,6 +51451,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -48970,6 +51542,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -48998,6 +51571,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -49085,6 +51659,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -49113,6 +51688,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -49271,6 +51847,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -49299,6 +51876,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -49389,6 +51967,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -49417,6 +51996,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -49489,6 +52069,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -49517,6 +52098,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -49625,7 +52207,7 @@ }, "resendInterval": { "$ref": "Duration", - "description": "Optional. This field is used to configure how often to send a full update of all non-healthy backends. If unspecified, full updates are not sent. If specified, must be in the range between 600 seconds to 3600 seconds. Nanos are disallowed." + "description": "Optional. This field is used to configure how often to send a full update of all non-healthy backends. If unspecified, full updates are not sent. If specified, must be in the range between 600 seconds to 3600 seconds. Nanos are disallowed. Can only be set for regional notification endpoints." }, "retryDurationSec": { "description": "How much time (in seconds) is spent attempting notification retries until a successful response is received. Default is 30s. Limit is 20m (1200s). Must be a positive number.", @@ -49676,6 +52258,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -49704,6 +52287,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -49783,6 +52367,27 @@ "description": "[Output Only] The error type identifier for this error.", "type": "string" }, + "errorDetails": { + "description": "[Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED.", + "items": { + "properties": { + "errorInfo": { + "$ref": "ErrorInfo" + }, + "help": { + "$ref": "Help" + }, + "localizedMessage": { + "$ref": "LocalizedMessage" + }, + "quotaInfo": { + "$ref": "QuotaExceededInfo" + } + }, + "type": "object" + }, + "type": "array" + }, "location": { "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional.", "type": "string" @@ -49897,6 +52502,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -49925,6 +52531,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -50028,6 +52635,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -50056,6 +52664,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -50146,6 +52755,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -50174,6 +52784,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -50246,6 +52857,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -50274,6 +52886,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -50331,22 +52944,22 @@ "description": "The base time that a host is ejected for. The real ejection time is equal to the base ejection time multiplied by the number of times the host has been ejected. Defaults to 30000ms or 30s." }, "consecutiveErrors": { - "description": "Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5.", + "description": "Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, "consecutiveGatewayFailure": { - "description": "The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3.", + "description": "The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, "enforcingConsecutiveErrors": { - "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0.", + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, "enforcingConsecutiveGatewayFailure": { - "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, @@ -50572,6 +53185,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -50600,6 +53214,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -50738,6 +53353,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -50766,6 +53382,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -50907,6 +53524,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -50935,6 +53553,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -50989,7 +53608,7 @@ "properties": { "defaultRouteAction": { "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a path matcher's defaultRouteAction." + "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within a path matcher's defaultRouteAction." }, "defaultService": { "description": "The full or partial URL to the BackendService resource. This URL is used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: - https://www.googleapis.com/compute/v1/projects/project /global/backendServices/backendService - compute/v1/projects/project/global/backendServices/backendService - global/backendServices/backendService If defaultRouteAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified. Only one of defaultService, defaultUrlRedirect , or defaultRouteAction.weightedBackendService must be set. Authorization requires one or more of the following Google IAM permissions on the specified resource default_service: - compute.backendBuckets.use - compute.backendServices.use ", @@ -51041,7 +53660,7 @@ }, "routeAction": { "$ref": "HttpRouteAction", - "description": "In response to a matching path, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of routeAction or urlRedirect must be set. URL maps for external HTTP(S) load balancers support only the urlRewrite action within a path rule's routeAction." + "description": "In response to a matching path, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of routeAction or urlRedirect must be set. URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within a path rule's routeAction." }, "service": { "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set.", @@ -51271,6 +53890,22 @@ "$ref": "UsageExportLocation", "description": "The naming prefix for daily usage reports and the Google Cloud Storage bucket where they are stored." }, + "vmDnsSetting": { + "description": "[Output Only] Default internal DNS setting used by VMs running in this project.", + "enum": [ + "GLOBAL_DEFAULT", + "UNSPECIFIED_VM_DNS_SETTING", + "ZONAL_DEFAULT", + "ZONAL_ONLY" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, "xpnProjectStatus": { "description": "[Output Only] The role this project has in a shared VPC configuration. Currently, only projects with the host role, which is specified by the value HOST, are differentiated.", "enum": [ @@ -51486,6 +54121,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -51514,6 +54150,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -51721,6 +54358,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -51749,6 +54387,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -51838,6 +54477,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -51866,6 +54506,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -51981,6 +54622,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -52009,6 +54651,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -52110,8 +54753,12 @@ "EXTERNAL_VPN_GATEWAYS", "FIREWALLS", "FORWARDING_RULES", + "GLOBAL_EXTERNAL_MANAGED_BACKEND_SERVICES", "GLOBAL_EXTERNAL_MANAGED_FORWARDING_RULES", + "GLOBAL_EXTERNAL_PROXY_LB_BACKEND_SERVICES", "GLOBAL_INTERNAL_ADDRESSES", + "GLOBAL_INTERNAL_MANAGED_BACKEND_SERVICES", + "GLOBAL_INTERNAL_TRAFFIC_DIRECTOR_BACKEND_SERVICES", "GPUS_ALL_REGIONS", "HEALTH_CHECKS", "IMAGES", @@ -52171,7 +54818,11 @@ "PUBLIC_ADVERTISED_PREFIXES", "PUBLIC_DELEGATED_PREFIXES", "REGIONAL_AUTOSCALERS", + "REGIONAL_EXTERNAL_MANAGED_BACKEND_SERVICES", + "REGIONAL_EXTERNAL_NETWORK_LB_BACKEND_SERVICES", "REGIONAL_INSTANCE_GROUP_MANAGERS", + "REGIONAL_INTERNAL_LB_BACKEND_SERVICES", + "REGIONAL_INTERNAL_MANAGED_BACKEND_SERVICES", "RESERVATIONS", "RESOURCE_POLICIES", "ROUTERS", @@ -52187,6 +54838,7 @@ "SSL_CERTIFICATES", "STATIC_ADDRESSES", "STATIC_BYOIP_ADDRESSES", + "STATIC_EXTERNAL_IPV6_ADDRESS_RANGES", "SUBNETWORKS", "T2A_CPUS", "T2D_CPUS", @@ -52316,6 +54968,14 @@ "", "", "", + "", + "", + "", + "", + "", + "", + "", + "", "The total number of snapshots allowed for a single project.", "", "", @@ -52334,6 +54994,7 @@ "", "", "", + "", "" ], "type": "string" @@ -52350,6 +55011,33 @@ }, "type": "object" }, + "QuotaExceededInfo": { + "description": "Additional details for quota exceeded error for resource quota.", + "id": "QuotaExceededInfo", + "properties": { + "dimensions": { + "additionalProperties": { + "type": "string" + }, + "description": "The map holding related quota dimensions.", + "type": "object" + }, + "limit": { + "description": "Current effective quota limit. The limit's unit depends on the quota type or metric.", + "format": "double", + "type": "number" + }, + "limitName": { + "description": "The name of the quota limit.", + "type": "string" + }, + "metricName": { + "description": "The Compute Engine quota metric name.", + "type": "string" + } + }, + "type": "object" + }, "Reference": { "description": "Represents a reference to a resource.", "id": "Reference", @@ -52483,6 +55171,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -52511,6 +55200,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -52600,6 +55290,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -52628,6 +55319,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -52755,6 +55447,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -52783,6 +55476,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -52887,6 +55581,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -52915,6 +55610,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -53130,6 +55826,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -53158,6 +55855,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -53305,6 +56003,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -53333,6 +56032,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -53464,6 +56164,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -53492,6 +56193,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -53641,7 +56343,7 @@ "id": "RegionTargetHttpsProxiesSetSslCertificatesRequest", "properties": { "sslCertificates": { - "description": "New set of SslCertificate resources to associate with this TargetHttpsProxy resource. Currently exactly one SslCertificate resource must be specified.", + "description": "New set of SslCertificate resources to associate with this TargetHttpsProxy resource.", "items": { "type": "string" }, @@ -53665,7 +56367,7 @@ "id": "RequestMirrorPolicy", "properties": { "backendService": { - "description": "The full or partial URL to the BackendService resource being mirrored to.", + "description": "The full or partial URL to the BackendService resource being mirrored to. The backend service configured for a mirroring policy must reference backends that are of the same type as the original backend service matched in the URL map. Serverless NEG backends are not currently supported as a mirrored backend service. ", "type": "string" } }, @@ -53717,7 +56419,7 @@ }, "shareSettings": { "$ref": "ShareSettings", - "description": "Share-settings for shared-reservation" + "description": "Specify share-settings to create a shared reservation. This property is optional. For more information about the syntax and options for this field and its subfields, see the guide for creating a shared reservation." }, "specificReservation": { "$ref": "AllocationSpecificSKUReservation", @@ -53836,6 +56538,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -53864,6 +56567,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -53953,6 +56657,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -53981,6 +56686,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -54064,6 +56770,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -54092,6 +56799,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -54154,7 +56862,7 @@ "type": "string" }, "type": { - "description": "Type of resource for which this commitment applies. Possible values are VCPU and MEMORY", + "description": "Type of resource for which this commitment applies. Possible values are VCPU, MEMORY, LOCAL_SSD, and ACCELERATOR.", "enum": [ "ACCELERATOR", "LOCAL_SSD", @@ -54208,6 +56916,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -54236,6 +56945,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -54412,6 +57122,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -54440,6 +57151,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -54639,6 +57351,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -54667,6 +57380,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -54882,6 +57596,17 @@ }, "type": "object" }, + "ResourceStatus": { + "description": "Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls.", + "id": "ResourceStatus", + "properties": { + "physicalHost": { + "description": "[Output Only] An opaque ID of the host on which the VM is running.", + "type": "string" + } + }, + "type": "object" + }, "Route": { "description": "Represents a Route resource. A route defines a path from VM instances in the VPC network to a specific destination. This destination can be inside or outside the VPC network. For more information, read the Routes overview.", "id": "Route", @@ -55040,6 +57765,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -55068,6 +57794,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -55190,6 +57917,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -55218,6 +57946,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -55290,7 +58019,7 @@ "type": "string" }, "encryptedInterconnectRouter": { - "description": "Indicates if a router is dedicated for use with encrypted VLAN attachments (interconnectAttachments). Not currently available publicly. ", + "description": "Indicates if a router is dedicated for use with encrypted VLAN attachments (interconnectAttachments).", "type": "boolean" }, "id": { @@ -55310,6 +58039,13 @@ "description": "[Output Only] Type of resource. Always compute#router for routers.", "type": "string" }, + "md5AuthenticationKeys": { + "description": "Keys used for MD5 authentication.", + "items": { + "$ref": "RouterMd5AuthenticationKey" + }, + "type": "array" + }, "name": { "annotations": { "required": [ @@ -55413,6 +58149,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -55441,6 +58178,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -55621,6 +58359,10 @@ ], "type": "string" }, + "md5AuthenticationKeyName": { + "description": "Present if MD5 authentication is enabled for the peering. Must be the name of one of the entries in the Router.md5_authentication_keys. The field must comply with RFC1035.", + "type": "string" + }, "name": { "annotations": { "required": [ @@ -55786,6 +58528,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -55814,6 +58557,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -55862,6 +58606,31 @@ }, "type": "object" }, + "RouterMd5AuthenticationKey": { + "id": "RouterMd5AuthenticationKey", + "properties": { + "key": { + "annotations": { + "required": [ + "compute.routers.insert" + ] + }, + "description": "[Input only] Value of the key. For patch and update calls, it can be skipped to copy the value from the previous configuration. This is allowed if the key with the same name existed before the operation. Maximum length is 80 characters. Can only contain printable ASCII characters.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.routers.insert", + "compute.routers.update" + ] + }, + "description": "Name used to identify the key. Must be unique within a router. Must be referenced by at least one bgpPeer. Must comply with RFC1035.", + "type": "string" + } + }, + "type": "object" + }, "RouterNat": { "description": "Represents a Nat resource. It enables the VMs within the specified subnetworks to access Internet without external IP addresses. It specifies a list of subnetworks (and the ranges within) that want to use NAT. Customers can also provide the external IPs that would be used for NAT. GCP would auto-allocate ephemeral IPs if no external IPs are provided.", "id": "RouterNat", @@ -56141,14 +58910,26 @@ "bfdStatus": { "$ref": "BfdStatus" }, + "enableIpv6": { + "description": "Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default.", + "type": "boolean" + }, "ipAddress": { "description": "IP address of the local BGP interface.", "type": "string" }, + "ipv6NexthopAddress": { + "description": "IPv6 address of the local BGP interface.", + "type": "string" + }, "linkedVpnTunnel": { "description": "URL of the VPN tunnel that this BGP peer controls.", "type": "string" }, + "md5AuthEnabled": { + "description": "Informs whether MD5 authentication is enabled on this BGP peer.", + "type": "boolean" + }, "name": { "description": "Name of this BGP peer. Unique within the Routers resource.", "type": "string" @@ -56162,6 +58943,10 @@ "description": "IP address of the remote BGP interface.", "type": "string" }, + "peerIpv6NexthopAddress": { + "description": "IPv6 address of the remote BGP interface.", + "type": "string" + }, "routerApplianceInstance": { "description": "[Output only] URI of the VM instance that is used as third-party router appliances such as Next Gen Firewalls, Virtual Routers, or Router Appliances. The VM instance is the peer side of the BGP session.", "type": "string" @@ -56184,6 +58969,18 @@ ], "type": "string" }, + "statusReason": { + "description": "Indicates why particular status was returned.", + "enum": [ + "MD5_AUTH_INTERNAL_PROBLEM", + "STATUS_REASON_UNSPECIFIED" + ], + "enumDescriptions": [ + "Indicates internal problems with configuration of MD5 authentication. This particular reason can only be returned when md5AuthEnabled is true and status is DOWN.", + "" + ], + "type": "string" + }, "uptime": { "description": "Time this session has been up. Format: 14 years, 51 weeks, 6 days, 23 hours, 59 minutes, 59 seconds", "type": "string" @@ -56342,6 +59139,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -56370,6 +59168,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -56488,25 +59287,25 @@ "id": "SSLHealthCheck", "properties": { "port": { - "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", + "description": "The TCP port number to which the health check prober sends packets. The default value is 443. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", + "description": "Not supported.", "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, SSL health check follows behavior specified in port and portName fields.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT" ], "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." ], "type": "string" }, @@ -56523,11 +59322,11 @@ "type": "string" }, "request": { - "description": "The application data to send once the SSL connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII.", + "description": "Instructs the health check prober to send this exact ASCII string, up to 1024 bytes in length, after establishing the TCP connection and SSL handshake.", "type": "string" }, "response": { - "description": "The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII.", + "description": "Creates a content-based SSL health check. In addition to establishing a TCP connection and the TLS handshake, you can configure the health check to pass only when the backend sends this exact response ASCII string, up to 1024 bytes in length. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-ssl-tcp", "type": "string" } }, @@ -56650,6 +59449,20 @@ "description": "An instance-attached disk resource.", "id": "SavedDisk", "properties": { + "architecture": { + "description": "[Output Only] The architecture of the attached disk.", + "enum": [ + "ARCHITECTURE_UNSPECIFIED", + "ARM64", + "X86_64" + ], + "enumDescriptions": [ + "Default value indicating Architecture is not set.", + "Machines with architecture ARM64", + "Machines with architecture X86_64" + ], + "type": "string" + }, "kind": { "default": "compute#savedDisk", "description": "[Output Only] Type of the resource. Always compute#savedDisk for attached disks.", @@ -56710,7 +59523,7 @@ "type": "object" }, "Scheduling": { - "description": "Sets the scheduling options for an Instance. NextID: 21", + "description": "Sets the scheduling options for an Instance.", "id": "Scheduling", "properties": { "automaticRestart": { @@ -56878,6 +59691,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -56906,6 +59720,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -56987,6 +59802,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -57015,6 +59831,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -57121,7 +59938,7 @@ "type": "string" }, "rules": { - "description": "A list of rules that belong to this policy. There must always be a default rule (rule with priority 2147483647 and match \"*\"). If no rules are provided when creating a security policy, a default rule with action \"allow\" will be added.", + "description": "A list of rules that belong to this policy. There must always be a default rule which is a rule with priority 2147483647 and match all condition (for the match condition this means match \"*\" for srcIpRanges and for the networkMatch condition every field must be either match \"*\" or not set). If no rules are provided when creating a security policy, a default rule with action \"allow\" will be added.", "items": { "$ref": "SecurityPolicyRule" }, @@ -57185,6 +60002,10 @@ "SecurityPolicyAdvancedOptionsConfig": { "id": "SecurityPolicyAdvancedOptionsConfig", "properties": { + "jsonCustomConfig": { + "$ref": "SecurityPolicyAdvancedOptionsConfigJsonCustomConfig", + "description": "Custom configuration to apply the JSON parsing. Only applicable when json_parsing is set to STANDARD." + }, "jsonParsing": { "enum": [ "DISABLED", @@ -57210,6 +60031,19 @@ }, "type": "object" }, + "SecurityPolicyAdvancedOptionsConfigJsonCustomConfig": { + "id": "SecurityPolicyAdvancedOptionsConfigJsonCustomConfig", + "properties": { + "contentTypes": { + "description": "A list of custom Content-Type header values to apply the JSON parsing. As per RFC 1341, a Content-Type header value has the following format: Content-Type := type \"/\" subtype *[\";\" parameter] When configuring a custom Content-Type header value, only the type/subtype needs to be specified, and the parameters should be excluded.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "SecurityPolicyDdosProtectionConfig": { "id": "SecurityPolicyDdosProtectionConfig", "properties": { @@ -57264,6 +60098,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -57292,6 +60127,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -57486,15 +60322,21 @@ "type": "string" }, "enforceOnKey": { - "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if this field 'enforce_on_key' is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforce_on_key_name\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforce_on_key_name\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. ", + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", "enum": [ "ALL", "HTTP_COOKIE", "HTTP_HEADER", + "HTTP_PATH", "IP", + "REGION_CODE", + "SNI", "XFF_IP" ], "enumDescriptions": [ + "", + "", + "", "", "", "", @@ -57808,6 +60650,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -57836,6 +60679,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -57902,6 +60746,7 @@ "enum": [ "ACCEPTED", "CLOSED", + "NEEDS_ATTENTION", "PENDING", "REJECTED", "STATUS_UNSPECIFIED" @@ -57909,6 +60754,7 @@ "enumDescriptions": [ "The connection has been accepted by the producer.", "The connection has been closed by the producer.", + "The connection has been accepted by the producer, but the producer needs to take further action before the forwarding rule can serve traffic.", "The connection is pending acceptance by the producer.", "The consumer is still connected but not using the connection.", "" @@ -57926,6 +60772,10 @@ "format": "uint32", "type": "integer" }, + "networkUrl": { + "description": "The network URL for the network to set the limit for.", + "type": "string" + }, "projectIdOrNum": { "description": "The project id or number for the project to set the limit for.", "type": "string" @@ -57974,6 +60824,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -58002,6 +60853,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -58074,6 +60926,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -58102,6 +60955,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -58165,11 +61019,13 @@ "description": "Type of sharing for this shared-reservation", "enum": [ "LOCAL", + "ORGANIZATION", "SHARE_TYPE_UNSPECIFIED", "SPECIFIC_PROJECTS" ], "enumDescriptions": [ "Default value.", + "Shared-reservation is open to entire Organization", "Default value. This value is unused.", "Shared-reservation is open to specific projects" ], @@ -58274,6 +61130,20 @@ "description": "Represents a Persistent Disk Snapshot resource. You can use snapshots to back up data on a regular interval. For more information, read Creating persistent disk snapshots.", "id": "Snapshot", "properties": { + "architecture": { + "description": "[Output Only] The architecture of the snapshot. Valid values are ARM64 or X86_64.", + "enum": [ + "ARCHITECTURE_UNSPECIFIED", + "ARM64", + "X86_64" + ], + "enumDescriptions": [ + "Default value indicating Architecture is not set.", + "Machines with architecture ARM64", + "Machines with architecture X86_64" + ], + "type": "string" + }, "autoCreated": { "description": "[Output Only] Set to true if snapshots are automatically created by applying resource policy on the target disk.", "type": "boolean" @@ -58282,6 +61152,11 @@ "description": "Creates the new snapshot in the snapshot chain labeled with the specified name. The chain name must be 1-63 characters long and comply with RFC1035. This is an uncommon option only for advanced service owners who needs to create separate snapshot chains, for example, for chargeback tracking. When you describe your snapshot resource, this field is visible only if it has a non-empty value.", "type": "string" }, + "creationSizeBytes": { + "description": "[Output Only] Size in bytes of the snapshot at creation time.", + "format": "int64", + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -58363,6 +61238,18 @@ "$ref": "CustomerEncryptionKey", "description": "Encrypts the snapshot using a customer-supplied encryption key. After you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the snapshot later. For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request. Customer-supplied encryption keys do not protect access to metadata of the snapshot. If you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." }, + "snapshotType": { + "description": "Indicates the type of the snapshot.", + "enum": [ + "ARCHIVE", + "STANDARD" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, "sourceDisk": { "description": "The source disk used to create this snapshot.", "type": "string" @@ -58375,6 +61262,14 @@ "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name.", "type": "string" }, + "sourceSnapshotSchedulePolicy": { + "description": "[Output Only] URL of the resource policy which created this scheduled snapshot.", + "type": "string" + }, + "sourceSnapshotSchedulePolicyId": { + "description": "[Output Only] ID of the resource policy which created this scheduled snapshot.", + "type": "string" + }, "status": { "description": "[Output Only] The status of the snapshot. This can be CREATING, DELETING, FAILED, READY, or UPLOADING.", "enum": [ @@ -58462,6 +61357,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -58490,6 +61386,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -58596,6 +61493,20 @@ }, "type": "array" }, + "keyRevocationActionType": { + "description": "KeyRevocationActionType of the instance. Supported options are \"STOP\" and \"NONE\". The default value is \"NONE\" if it is not specified.", + "enum": [ + "KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED", + "NONE", + "STOP" + ], + "enumDescriptions": [ + "Default value. This value is unused.", + "Indicates user chose no operation.", + "Indicates user chose to opt for VM shutdown on key revocation." + ], + "type": "string" + }, "labels": { "additionalProperties": { "type": "string" @@ -58768,6 +61679,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -58796,6 +61708,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -58886,6 +61799,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -58914,6 +61828,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -59060,6 +61975,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -59088,6 +62004,137 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "SslPoliciesAggregatedList": { + "id": "SslPoliciesAggregatedList", + "properties": { + "etag": { + "type": "string" + }, + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "SslPoliciesScopedList", + "description": "Name of the scope containing this set of SSL policies." + }, + "description": "A list of SslPoliciesScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#sslPoliciesAggregatedList", + "description": "[Output Only] Type of resource. Always compute#sslPolicyAggregatedList for lists of SSL Policies.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -59177,6 +62224,121 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "SslPoliciesListAvailableFeaturesResponse": { + "id": "SslPoliciesListAvailableFeaturesResponse", + "properties": { + "features": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "SslPoliciesScopedList": { + "id": "SslPoliciesScopedList", + "properties": { + "sslPolicies": { + "description": "A list of SslPolicies contained in this scope.", + "items": { + "$ref": "SslPolicy" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of SSL policies when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -59205,6 +62367,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -59253,18 +62416,6 @@ }, "type": "object" }, - "SslPoliciesListAvailableFeaturesResponse": { - "id": "SslPoliciesListAvailableFeaturesResponse", - "properties": { - "features": { - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, "SslPolicy": { "description": "Represents an SSL Policy resource. Use SSL policies to control the SSL features, such as versions and cipher suites, offered by an HTTPS or SSL Proxy load balancer. For more information, read SSL Policy Concepts.", "id": "SslPolicy", @@ -59341,6 +62492,10 @@ ], "type": "string" }, + "region": { + "description": "[Output Only] URL of the region where the regional SSL policy resides. This field is not applicable to global SSL policies.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -59360,6 +62515,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -59388,6 +62544,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -59506,7 +62663,7 @@ "type": "boolean" }, "externalIpv6Prefix": { - "description": "[Output Only] The external IPv6 address range that is assigned to this subnetwork.", + "description": "The external IPv6 address range that is owned by this subnetwork.", "type": "string" }, "fingerprint": { @@ -59704,6 +62861,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -59732,6 +62890,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -59822,6 +62981,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -59850,6 +63010,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -60008,6 +63169,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -60036,6 +63198,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -60115,25 +63278,25 @@ "id": "TCPHealthCheck", "properties": { "port": { - "description": "The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535.", + "description": "The TCP port number to which the health check prober sends packets. The default value is 80. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", + "description": "Not supported.", "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, TCP health check follows behavior specified in port and portName fields.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT" ], "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." ], "type": "string" }, @@ -60150,11 +63313,11 @@ "type": "string" }, "request": { - "description": "The application data to send once the TCP connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII.", + "description": "Instructs the health check prober to send this exact ASCII string, up to 1024 bytes in length, after establishing the TCP connection.", "type": "string" }, "response": { - "description": "The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII.", + "description": "Creates a content-based TCP health check. In addition to establishing a TCP connection, you can configure the health check to pass only when the backend sends this exact response ASCII string, up to 1024 bytes in length. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-ssl-tcp", "type": "string" } }, @@ -60277,6 +63440,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -60305,6 +63469,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -60377,6 +63542,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -60405,6 +63571,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -60584,6 +63751,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -60612,6 +63780,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -60684,6 +63853,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -60712,6 +63882,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -60940,6 +64111,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -60968,6 +64140,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -61058,6 +64231,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -61086,6 +64260,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -61166,7 +64341,7 @@ "type": "string" }, "natPolicy": { - "description": "NAT option controlling how IPs are NAT'ed to the instance. Currently only NO_NAT (default value) is supported.", + "description": "Must have a value of NO_NAT. Protocol forwarding delivers packets while preserving the destination IP address of the forwarding rule referencing the target instance.", "enum": [ "NO_NAT" ], @@ -61239,6 +64414,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -61267,6 +64443,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -61357,6 +64534,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -61385,6 +64563,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -61457,6 +64636,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -61485,6 +64665,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -61667,6 +64848,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -61695,6 +64877,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -61802,6 +64985,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -61830,6 +65014,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -61954,6 +65139,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -61982,6 +65168,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -62197,6 +65384,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -62225,6 +65413,109 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "TargetTcpProxiesScopedList": { + "id": "TargetTcpProxiesScopedList", + "properties": { + "targetTcpProxies": { + "description": "A list of TargetTcpProxies contained in this scope.", + "items": { + "$ref": "TargetTcpProxy" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of backend services when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -62344,6 +65635,10 @@ ], "type": "string" }, + "region": { + "description": "[Output Only] URL of the region where the regional TCP proxy resides. This field is not applicable to global TCP proxy.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -62355,6 +65650,133 @@ }, "type": "object" }, + "TargetTcpProxyAggregatedList": { + "id": "TargetTcpProxyAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "TargetTcpProxiesScopedList", + "description": "Name of the scope containing this set of TargetTcpProxies." + }, + "description": "A list of TargetTcpProxiesScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#targetTcpProxyAggregatedList", + "description": "[Output Only] Type of resource. Always compute#targetTcpProxyAggregatedList for lists of Target TCP Proxies.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "TargetTcpProxyList": { "description": "Contains a list of TargetTcpProxy resources.", "id": "TargetTcpProxyList", @@ -62397,6 +65819,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -62425,6 +65848,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -62604,6 +66028,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -62632,6 +66057,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -62722,6 +66148,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -62750,6 +66177,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -62822,6 +66250,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -62850,6 +66279,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -62995,7 +66425,7 @@ }, "defaultRouteAction": { "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within defaultRouteAction. defaultRouteAction has no effect when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." + "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within defaultRouteAction. defaultRouteAction has no effect when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." }, "defaultService": { "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of defaultService, defaultUrlRedirect , or defaultRouteAction.weightedBackendService must be set. defaultService has no effect when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true.", @@ -63107,6 +66537,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -63135,6 +66566,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -63322,6 +66754,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -63350,6 +66783,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -63422,6 +66856,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -63450,6 +66885,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -63693,6 +67129,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -63721,6 +67158,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -63829,6 +67267,13 @@ "format": "int32", "type": "integer" }, + "ruleMappings": { + "description": "Information about mappings provided by rules in this NAT.", + "items": { + "$ref": "VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings" + }, + "type": "array" + }, "sourceAliasIpRange": { "description": "Alias IP range for this interface endpoint. It will be a private (RFC 1918) IP range. Examples: \"10.33.4.55/32\", or \"192.168.5.0/24\".", "type": "string" @@ -63840,6 +67285,42 @@ }, "type": "object" }, + "VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings": { + "description": "Contains information of NAT Mappings provided by a NAT Rule.", + "id": "VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings", + "properties": { + "drainNatIpPortRanges": { + "description": "List of all drain IP:port-range mappings assigned to this interface by this rule. These ranges are inclusive, that is, both the first and the last ports can be used for NAT. Example: [\"2.2.2.2:12345-12355\", \"1.1.1.1:2234-2234\"].", + "items": { + "type": "string" + }, + "type": "array" + }, + "natIpPortRanges": { + "description": "A list of all IP:port-range mappings assigned to this interface by this rule. These ranges are inclusive, that is, both the first and the last ports can be used for NAT. Example: [\"2.2.2.2:12345-12355\", \"1.1.1.1:2234-2234\"].", + "items": { + "type": "string" + }, + "type": "array" + }, + "numTotalDrainNatPorts": { + "description": "Total number of drain ports across all NAT IPs allocated to this interface by this rule. It equals the aggregated port number in the field drain_nat_ip_port_ranges.", + "format": "int32", + "type": "integer" + }, + "numTotalNatPorts": { + "description": "Total number of ports across all NAT IPs allocated to this interface by this rule. It equals the aggregated port number in the field nat_ip_port_ranges.", + "format": "int32", + "type": "integer" + }, + "ruleNumber": { + "description": "Rule number of the NAT Rule.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "VmEndpointNatMappingsList": { "description": "Contains a list of VmEndpointNatMappings.", "id": "VmEndpointNatMappingsList", @@ -63882,6 +67363,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -63910,6 +67392,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -64020,7 +67503,7 @@ "type": "string" }, "stackType": { - "description": "The stack type for this VPN gateway to identify the IP protocols that are enabled. If not specified, IPV4_ONLY will be used.", + "description": "The stack type for this VPN gateway to identify the IP protocols that are enabled. Possible values are: IPV4_ONLY, IPV4_IPV6. If not specified, IPV4_ONLY will be used.", "enum": [ "IPV4_IPV6", "IPV4_ONLY" @@ -64090,6 +67573,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -64118,6 +67602,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -64208,6 +67693,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -64236,6 +67722,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -64383,11 +67870,11 @@ "type": "integer" }, "interconnectAttachment": { - "description": "URL of the VLAN attachment (interconnectAttachment) resource for this VPN gateway interface. When the value of this field is present, the VPN gateway is used for IPsec-encrypted Cloud Interconnect; all egress or ingress traffic for this VPN gateway interface goes through the specified VLAN attachment resource. Not currently available publicly. ", + "description": "URL of the VLAN attachment (interconnectAttachment) resource for this VPN gateway interface. When the value of this field is present, the VPN gateway is used for HA VPN over Cloud Interconnect; all egress or ingress traffic for this VPN gateway interface goes through the specified VLAN attachment resource.", "type": "string" }, "ipAddress": { - "description": "[Output Only] IP address for this VPN interface associated with the VPN gateway. The IP address could be either a regional external IP address or a regional internal IP address. The two IP addresses for a VPN gateway must be all regional external or regional internal IP addresses. There cannot be a mix of regional external IP addresses and regional internal IP addresses. For IPsec-encrypted Cloud Interconnect, the IP addresses for both interfaces could either be regional internal IP addresses or regional external IP addresses. For regular (non IPsec-encrypted Cloud Interconnect) HA VPN tunnels, the IP address must be a regional external IP address.", + "description": "[Output Only] IP address for this VPN interface associated with the VPN gateway. The IP address could be either a regional external IP address or a regional internal IP address. The two IP addresses for a VPN gateway must be all regional external or regional internal IP addresses. There cannot be a mix of regional external IP addresses and regional internal IP addresses. For HA VPN over Cloud Interconnect, the IP addresses for both interfaces could either be regional internal IP addresses or regional external IP addresses. For regular (non HA VPN over Cloud Interconnect) HA VPN tunnels, the IP address must be a regional external IP address.", "type": "string" } }, @@ -64426,6 +67913,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -64454,6 +67942,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -64555,7 +68044,7 @@ "type": "string" }, "peerExternalGatewayInterface": { - "description": "The interface ID of the external VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created.", + "description": "The interface ID of the external VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created. Possible values are: `0`, `1`, `2`, `3`. The number of IDs in use depends on the external VPN gateway redundancy type.", "format": "int32", "type": "integer" }, @@ -64637,7 +68126,7 @@ "type": "string" }, "vpnGatewayInterface": { - "description": "The interface ID of the VPN gateway with which this VPN tunnel is associated.", + "description": "The interface ID of the VPN gateway with which this VPN tunnel is associated. Possible values are: `0`, `1`.", "format": "int32", "type": "integer" } @@ -64693,6 +68182,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -64721,6 +68211,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -64811,6 +68302,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -64839,6 +68331,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -64911,6 +68404,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -64939,6 +68433,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -65082,6 +68577,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -65110,6 +68606,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", @@ -65287,6 +68784,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", @@ -65315,6 +68813,7 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index 95e7fef54278d..3d27b1a03596c 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -8,35 +8,35 @@ // // For product documentation, see: https://cloud.google.com/compute/ // -// Creating a client +// # Creating a client // // Usage example: // -// import "google.golang.org/api/compute/v1" -// ... -// ctx := context.Background() -// computeService, err := compute.NewService(ctx) +// import "google.golang.org/api/compute/v1" +// ... +// ctx := context.Background() +// computeService, err := compute.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // -// Other authentication options +// # Other authentication options // // By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: // -// computeService, err := compute.NewService(ctx, option.WithScopes(compute.DevstorageReadWriteScope)) +// computeService, err := compute.NewService(ctx, option.WithScopes(compute.DevstorageReadWriteScope)) // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // -// computeService, err := compute.NewService(ctx, option.WithAPIKey("AIza...")) +// computeService, err := compute.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// computeService, err := compute.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// computeService, err := compute.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package compute // import "google.golang.org/api/compute/v1" @@ -177,6 +177,7 @@ func New(client *http.Client) (*Service, error) { s.Licenses = NewLicensesService(s) s.MachineImages = NewMachineImagesService(s) s.MachineTypes = NewMachineTypesService(s) + s.NetworkAttachments = NewNetworkAttachmentsService(s) s.NetworkEdgeSecurityServices = NewNetworkEdgeSecurityServicesService(s) s.NetworkEndpointGroups = NewNetworkEndpointGroupsService(s) s.NetworkFirewallPolicies = NewNetworkFirewallPoliciesService(s) @@ -204,8 +205,10 @@ func New(client *http.Client) (*Service, error) { s.RegionOperations = NewRegionOperationsService(s) s.RegionSecurityPolicies = NewRegionSecurityPoliciesService(s) s.RegionSslCertificates = NewRegionSslCertificatesService(s) + s.RegionSslPolicies = NewRegionSslPoliciesService(s) s.RegionTargetHttpProxies = NewRegionTargetHttpProxiesService(s) s.RegionTargetHttpsProxies = NewRegionTargetHttpsProxiesService(s) + s.RegionTargetTcpProxies = NewRegionTargetTcpProxiesService(s) s.RegionUrlMaps = NewRegionUrlMapsService(s) s.Regions = NewRegionsService(s) s.Reservations = NewReservationsService(s) @@ -305,6 +308,8 @@ type Service struct { MachineTypes *MachineTypesService + NetworkAttachments *NetworkAttachmentsService + NetworkEdgeSecurityServices *NetworkEdgeSecurityServicesService NetworkEndpointGroups *NetworkEndpointGroupsService @@ -359,10 +364,14 @@ type Service struct { RegionSslCertificates *RegionSslCertificatesService + RegionSslPolicies *RegionSslPoliciesService + RegionTargetHttpProxies *RegionTargetHttpProxiesService RegionTargetHttpsProxies *RegionTargetHttpsProxiesService + RegionTargetTcpProxies *RegionTargetTcpProxiesService + RegionUrlMaps *RegionUrlMapsService Regions *RegionsService @@ -718,6 +727,15 @@ type MachineTypesService struct { s *Service } +func NewNetworkAttachmentsService(s *Service) *NetworkAttachmentsService { + rs := &NetworkAttachmentsService{s: s} + return rs +} + +type NetworkAttachmentsService struct { + s *Service +} + func NewNetworkEdgeSecurityServicesService(s *Service) *NetworkEdgeSecurityServicesService { rs := &NetworkEdgeSecurityServicesService{s: s} return rs @@ -961,6 +979,15 @@ type RegionSslCertificatesService struct { s *Service } +func NewRegionSslPoliciesService(s *Service) *RegionSslPoliciesService { + rs := &RegionSslPoliciesService{s: s} + return rs +} + +type RegionSslPoliciesService struct { + s *Service +} + func NewRegionTargetHttpProxiesService(s *Service) *RegionTargetHttpProxiesService { rs := &RegionTargetHttpProxiesService{s: s} return rs @@ -979,6 +1006,15 @@ type RegionTargetHttpsProxiesService struct { s *Service } +func NewRegionTargetTcpProxiesService(s *Service) *RegionTargetTcpProxiesService { + rs := &RegionTargetTcpProxiesService{s: s} + return rs +} + +type RegionTargetTcpProxiesService struct { + s *Service +} + func NewRegionUrlMapsService(s *Service) *RegionUrlMapsService { rs := &RegionUrlMapsService{s: s} return rs @@ -1395,6 +1431,9 @@ type AcceleratorTypeAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -1582,6 +1621,9 @@ type AcceleratorTypeListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -1748,6 +1790,9 @@ type AcceleratorTypesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -1993,6 +2038,16 @@ type Address struct { // "UNSPECIFIED_VERSION" IpVersion string `json:"ipVersion,omitempty"` + // Ipv6EndpointType: The endpoint type of this address, which should be + // VM or NETLB. This is used for deciding which type of endpoint this + // address can be used after the external IPv6 address reservation. + // + // Possible values: + // "NETLB" - Reserved IPv6 address can be used on network load + // balancer. + // "VM" - Reserved IPv6 address can be used on VM. + Ipv6EndpointType string `json:"ipv6EndpointType,omitempty"` + // Kind: [Output Only] Type of the resource. Always compute#address for // addresses. Kind string `json:"kind,omitempty"` @@ -2043,20 +2098,20 @@ type Address struct { // NAT_AUTO for the regional external IP addresses used by Cloud NAT // when allocating addresses using automatic NAT IP address allocation. // - IPSEC_INTERCONNECT for addresses created from a private IP range - // that are reserved for a VLAN attachment in an *IPsec-encrypted Cloud + // that are reserved for a VLAN attachment in an *HA VPN over Cloud // Interconnect* configuration. These addresses are regional resources. - // Not currently available publicly. - `SHARED_LOADBALANCER_VIP` for an - // internal IP address that is assigned to multiple internal forwarding - // rules. - `PRIVATE_SERVICE_CONNECT` for a private network address that - // is used to configure Private Service Connect. Only global internal - // addresses can use this purpose. + // - `SHARED_LOADBALANCER_VIP` for an internal IP address that is + // assigned to multiple internal forwarding rules. - + // `PRIVATE_SERVICE_CONNECT` for a private network address that is used + // to configure Private Service Connect. Only global internal addresses + // can use this purpose. // // Possible values: // "DNS_RESOLVER" - DNS resolver address in the subnetwork. // "GCE_ENDPOINT" - VM internal/alias IP, Internal LB service IP, etc. // "IPSEC_INTERCONNECT" - A regional internal IP address range - // reserved for the VLAN attachment that is used in IPsec-encrypted - // Cloud Interconnect. This regional internal IP address range must not + // reserved for the VLAN attachment that is used in HA VPN over Cloud + // Interconnect. This regional internal IP address range must not // overlap with any IP address range of subnet/route in the VPC network // and its peering networks. After the VLAN attachment is created with // the reserved IP address range, when creating a new VPN gateway, its @@ -2212,6 +2267,9 @@ type AddressAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -2398,6 +2456,9 @@ type AddressListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -2562,6 +2623,9 @@ type AddressesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -2692,6 +2756,13 @@ type AdvancedMachineFeatures struct { // processor is assumed. ThreadsPerCore int64 `json:"threadsPerCore,omitempty"` + // VisibleCoreCount: The number of physical cores to expose to an + // instance. Multiply by the number of threads per core to compute the + // total number of virtual CPUs to expose to the instance. If unset, the + // number of cores is inferred from the instance's nominal CPU count and + // the underlying platform's SMT width. + VisibleCoreCount int64 `json:"visibleCoreCount,omitempty"` + // ForceSendFields is a list of field names (e.g. // "EnableNestedVirtualization") to unconditionally include in API // requests. By default, fields with empty or default values are omitted @@ -2882,6 +2953,16 @@ func (s *AllocationSpecificSKUReservation) MarshalJSON() ([]byte, error) { // AttachedDisk: An instance-attached disk resource. type AttachedDisk struct { + // Architecture: [Output Only] The architecture of the attached disk. + // Valid values are ARM64 or X86_64. + // + // Possible values: + // "ARCHITECTURE_UNSPECIFIED" - Default value indicating Architecture + // is not set. + // "ARM64" - Machines with architecture ARM64 + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` + // AutoDelete: Specifies whether the disk will be auto-deleted when the // instance is deleted (but not when the disk is detached from the // instance). @@ -2921,6 +3002,11 @@ type AttachedDisk struct { // DiskSizeGb: The size of the disk in GB. DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + // ForceAttach: [Input Only] Whether to force attach the regional disk + // even if it's currently attached to another instance. If you try to + // force attach a zonal disk to an instance, you will receive an error. + ForceAttach bool `json:"forceAttach,omitempty"` + // GuestOsFeatures: A list of features to enable on the guest operating // system. Applicable only for bootable images. Read Enabling guest // operating system features to see a list of available options. @@ -2939,11 +3025,10 @@ type AttachedDisk struct { InitializeParams *AttachedDiskInitializeParams `json:"initializeParams,omitempty"` // Interface: Specifies the disk interface to use for attaching this - // disk, which is either SCSI or NVME. The default is SCSI. Persistent - // disks must always use SCSI and the request will fail if you attempt - // to attach a persistent disk in any other format than SCSI. Local SSDs - // can use either NVME or SCSI. For performance characteristics of SCSI - // over NVMe, see Local SSD performance. + // disk, which is either SCSI or NVME. For most machine types, the + // default is SCSI. Local SSDs can use either NVME or SCSI. In certain + // configurations, persistent disks can use NVMe. For more information, + // see About persistent disks. // // Possible values: // "NVME" @@ -2991,7 +3076,7 @@ type AttachedDisk struct { // "SCRATCH" Type string `json:"type,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoDelete") to + // ForceSendFields is a list of field names (e.g. "Architecture") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -2999,10 +3084,10 @@ type AttachedDisk struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoDelete") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Architecture") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -3022,6 +3107,16 @@ func (s *AttachedDisk) MarshalJSON() ([]byte, error) { // property is mutually exclusive with the source property; you can only // define one or the other, but not both. type AttachedDiskInitializeParams struct { + // Architecture: The architecture of the attached disk. Valid values are + // arm64 or x86_64. + // + // Possible values: + // "ARCHITECTURE_UNSPECIFIED" - Default value indicating Architecture + // is not set. + // "ARM64" - Machines with architecture ARM64 + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` + // Description: An optional description. Provide this property when // creating the disk. Description string `json:"description,omitempty"` @@ -3044,13 +3139,15 @@ type AttachedDiskInitializeParams struct { // URL. For example: // https://www.googleapis.com/compute/v1/projects/project/zones/zone // /diskTypes/pd-standard For a full list of acceptable values, see - // Persistent disk types. If you define this field, you can provide - // either the full or partial URL. For example, the following are valid - // values: - + // Persistent disk types. If you specify this field when creating a VM, + // you can provide either the full or partial URL. For example, the + // following values are valid: - // https://www.googleapis.com/compute/v1/projects/project/zones/zone // /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - // - zones/zone/diskTypes/diskType Note that for InstanceTemplate, this - // is the name of the disk type, not URL. + // - zones/zone/diskTypes/diskType If you specify this field when + // creating or updating an instance template or all-instances + // configuration, specify the type of the disk, not the URL. For + // example: pd-standard. DiskType string `json:"diskType,omitempty"` // Labels: Labels to apply to this disk. These can be later modified by @@ -3080,6 +3177,13 @@ type AttachedDiskInitializeParams struct { // see the Extreme persistent disk documentation. ProvisionedIops int64 `json:"provisionedIops,omitempty,string"` + // ResourceManagerTags: Resource manager tags to be bound to the disk. + // Tag keys and values have the same definition as resource manager + // tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values + // are in the format `tagValues/456`. The field is ignored (both PUT & + // PATCH) when empty. + ResourceManagerTags map[string]string `json:"resourceManagerTags,omitempty"` + // ResourcePolicies: Resource policies applied to this disk for // automatic snapshot creations. Specified using the full or partial // URL. For instance template, specify only the resource policy name. @@ -3104,10 +3208,10 @@ type AttachedDiskInitializeParams struct { // SourceImageEncryptionKey: The customer-supplied encryption key of the // source image. Required if the source image is protected by a - // customer-supplied encryption key. Instance templates do not store - // customer-supplied encryption keys, so you cannot create disks for - // instances in a managed instance group if the source images are - // encrypted with your own keys. + // customer-supplied encryption key. InstanceTemplate and + // InstancePropertiesPatch do not store customer-supplied encryption + // keys, so you cannot create disks for instances in a managed instance + // group if the source images are encrypted with your own keys. SourceImageEncryptionKey *CustomerEncryptionKey `json:"sourceImageEncryptionKey,omitempty"` // SourceSnapshot: The source snapshot to create this disk. When @@ -3122,7 +3226,7 @@ type AttachedDiskInitializeParams struct { // the source snapshot. SourceSnapshotEncryptionKey *CustomerEncryptionKey `json:"sourceSnapshotEncryptionKey,omitempty"` - // ForceSendFields is a list of field names (e.g. "Description") to + // ForceSendFields is a list of field names (e.g. "Architecture") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -3130,7 +3234,7 @@ type AttachedDiskInitializeParams struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Description") to include + // NullFields is a list of field names (e.g. "Architecture") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -3486,6 +3590,9 @@ type AutoscalerAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -3672,6 +3779,9 @@ type AutoscalerListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -3952,6 +4062,9 @@ type AutoscalersScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -4504,7 +4617,9 @@ type Backend struct { // offering 0% of its available capacity. The valid ranges are 0.0 and // [0.1,1.0]. You cannot configure a setting larger than 0 and smaller // than 0.1. You cannot configure a setting of 0 when there is only one - // backend attached to the backend service. + // backend attached to the backend service. Not available with backends + // that don't support using a balancingMode. This includes backends such + // as global internet NEGs, regional serverless NEGs, and PSC NEGs. CapacityScaler float64 `json:"capacityScaler,omitempty"` // Description: An optional description of this resource. Provide this @@ -4562,7 +4677,7 @@ type Backend struct { MaxRatePerInstance float64 `json:"maxRatePerInstance,omitempty"` // MaxUtilization: Optional parameter to define a target capacity for - // the UTILIZATIONbalancing mode. The valid range is [0.0, 1.0]. For + // the UTILIZATION balancing mode. The valid range is [0.0, 1.0]. For // usage guidelines, see Utilization balancing mode. MaxUtilization float64 `json:"maxUtilization,omitempty"` @@ -4619,6 +4734,16 @@ type BackendBucket struct { // CdnPolicy: Cloud CDN configuration for this BackendBucket. CdnPolicy *BackendBucketCdnPolicy `json:"cdnPolicy,omitempty"` + // CompressionMode: Compress text responses using Brotli or gzip + // compression, based on the client's Accept-Encoding header. + // + // Possible values: + // "AUTOMATIC" - Automatically uses the best compression based on the + // Accept-Encoding header sent by the client. + // "DISABLED" - Disables compression. Existing compressed responses + // cached by Cloud CDN will not be served to clients. + CompressionMode string `json:"compressionMode,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -5030,6 +5155,9 @@ type BackendBucketListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -5172,6 +5300,16 @@ type BackendService struct { CircuitBreakers *CircuitBreakers `json:"circuitBreakers,omitempty"` + // CompressionMode: Compress text responses using Brotli or gzip + // compression, based on the client's Accept-Encoding header. + // + // Possible values: + // "AUTOMATIC" - Automatically uses the best compression based on the + // Accept-Encoding header sent by the client. + // "DISABLED" - Disables compression. Existing compressed responses + // cached by Cloud CDN will not be served to clients. + CompressionMode string `json:"compressionMode,omitempty"` + ConnectionDraining *ConnectionDraining `json:"connectionDraining,omitempty"` // ConnectionTrackingPolicy: Connection Tracking configuration for this @@ -5371,11 +5509,9 @@ type BackendService struct { // hosts from the load balancing pool for the backend service. If not // set, this feature is considered disabled. This field is applicable to // either: - A regional backend service with the service_protocol set to - // HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to + // HTTP, HTTPS, HTTP2, or GRPC, and load_balancing_scheme set to // INTERNAL_MANAGED. - A global backend service with the - // load_balancing_scheme set to INTERNAL_SELF_MANAGED. Not supported - // when the backend service is referenced by a URL map that is bound to - // target gRPC proxy that has validateForProxyless field set to true. + // load_balancing_scheme set to INTERNAL_SELF_MANAGED. OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` // Port: Deprecated in favor of portName. The TCP port to connect on the @@ -5594,6 +5730,9 @@ type BackendServiceAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -6261,6 +6400,9 @@ type BackendServiceListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -6499,15 +6641,15 @@ func (s *BackendServiceLocalityLoadBalancingPolicyConfigPolicy) MarshalJSON() ([ // BackendServiceLogConfig: The available logging options for the load // balancer traffic served by this backend service. type BackendServiceLogConfig struct { - // Enable: This field denotes whether to enable logging for the load - // balancer traffic served by this backend service. + // Enable: Denotes whether to enable logging for the load balancer + // traffic served by this backend service. The default value is false. Enable bool `json:"enable,omitempty"` // SampleRate: This field can only be specified if logging is enabled // for this backend service. The value of the field must be in [0, 1]. // This configures the sampling rate of requests to the load balancer // where 1.0 means all logged requests are reported and 0.0 means no - // logged requests are reported. The default value is 0.0. + // logged requests are reported. The default value is 1.0. SampleRate float64 `json:"sampleRate,omitempty"` // ForceSendFields is a list of field names (e.g. "Enable") to @@ -6630,6 +6772,9 @@ type BackendServicesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -7005,19 +7150,24 @@ type Binding struct { // `allUsers`: A special identifier that represents anyone who is on the // internet; with or without a Google account. * // `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. * - // `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . * - // `serviceAccount:{emailid}`: An email address that represents a - // service account. For example, - // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An - // email address that represents a Google group. For example, - // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An - // email address (plus unique identifier) representing a user that has - // been recently deleted. For example, - // `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered - // user retains the role in the binding. * + // who is authenticated with a Google account or a service account. Does + // not include identities that come from external identity providers + // (IdPs) through identity federation. * `user:{emailid}`: An email + // address that represents a specific Google account. For example, + // `alice@example.com` . * `serviceAccount:{emailid}`: An email address + // that represents a Google service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * + // `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: + // An identifier for a Kubernetes service account + // (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). + // For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. + // * `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. * + // `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a user that has been recently + // deleted. For example, `alice@example.com?uid=123456789012345678901`. + // If the user is recovered, this value reverts to `user:{emailid}` and + // the recovered user retains the role in the binding. * // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address // (plus unique identifier) representing a service account that has been // recently deleted. For example, @@ -7073,7 +7223,8 @@ type BulkInsertInstanceResource struct { // to be created. Required if sourceInstanceTemplate is not provided. InstanceProperties *InstanceProperties `json:"instanceProperties,omitempty"` - // LocationPolicy: Policy for chosing target zone. + // LocationPolicy: Policy for chosing target zone. For more information, + // see Create VMs in bulk . LocationPolicy *LocationPolicy `json:"locationPolicy,omitempty"` // MinCount: The minimum number of instances to create. If no min_count @@ -7368,6 +7519,10 @@ type Commitment struct { // license commitment. LicenseResource *LicenseResourceCommitment `json:"licenseResource,omitempty"` + // MergeSourceCommitments: List of source commitments to be merged into + // a new commitment. + MergeSourceCommitments []string `json:"mergeSourceCommitments,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -7401,6 +7556,10 @@ type Commitment struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SplitSourceCommitment: Source commitment to be splitted into a new + // commitment. + SplitSourceCommitment string `json:"splitSourceCommitment,omitempty"` + // StartTimestamp: [Output Only] Commitment start time in RFC3339 text // format. StartTimestamp string `json:"startTimestamp,omitempty"` @@ -7411,6 +7570,7 @@ type Commitment struct { // // Possible values: // "ACTIVE" + // "CANCELLED" // "CREATING" // "EXPIRED" // "NOT_YET_ACTIVE" @@ -7547,6 +7707,9 @@ type CommitmentAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -7733,6 +7896,9 @@ type CommitmentListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -7898,6 +8064,9 @@ type CommitmentsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -8243,7 +8412,9 @@ type CorsPolicy struct { // AllowOriginRegexes: Specifies a regular expression that matches // allowed origins. For more information about the regular expression // syntax, see Syntax. An origin is allowed if it matches either an item - // in allowOrigins or an item in allowOriginRegexes. + // in allowOrigins or an item in allowOriginRegexes. Regular expressions + // can only be used when the loadBalancingScheme is set to + // INTERNAL_SELF_MANAGED. AllowOriginRegexes []string `json:"allowOriginRegexes,omitempty"` // AllowOrigins: Specifies the list of origins that is allowed to do @@ -8461,6 +8632,16 @@ func (s *DeprecationStatus) MarshalJSON() ([]byte, error) { // persistent disks. The regionDisks resource represents a regional // persistent disk. For more information, read Regional resources. type Disk struct { + // Architecture: The architecture of the disk. Valid values are ARM64 or + // X86_64. + // + // Possible values: + // "ARCHITECTURE_UNSPECIFIED" - Default value indicating Architecture + // is not set. + // "ARM64" - Machines with architecture ARM64 + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -8548,6 +8729,10 @@ type Disk struct { // Options: Internal use only. Options string `json:"options,omitempty"` + // Params: Input only. [Input Only] Additional params passed with the + // request, but not persisted as part of resource payload. + Params *DiskParams `json:"params,omitempty"` + // PhysicalBlockSizeBytes: Physical block size of the persistent disk, // in bytes. If not present in a request, a default value is used. The // currently supported size is 4096, other sizes may be added in the @@ -8699,21 +8884,20 @@ type Disk struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Architecture") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Architecture") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -8803,6 +8987,9 @@ type DiskAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -9069,6 +9256,9 @@ type DiskListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -9217,6 +9407,39 @@ func (s *DiskMoveRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DiskParams: Additional disk params. +type DiskParams struct { + // ResourceManagerTags: Resource manager tags to be bound to the disk. + // Tag keys and values have the same definition as resource manager + // tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values + // are in the format `tagValues/456`. The field is ignored (both PUT & + // PATCH) when empty. + ResourceManagerTags map[string]string `json:"resourceManagerTags,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ResourceManagerTags") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ResourceManagerTags") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DiskParams) MarshalJSON() ([]byte, error) { + type NoMethod DiskParams + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DiskType: Represents a Disk Type resource. Google Compute Engine has // two Disk Type resources: * Regional // (/compute/docs/reference/rest/v1/regionDiskTypes) * Zonal @@ -9380,6 +9603,9 @@ type DiskTypeAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -9566,6 +9792,9 @@ type DiskTypeListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -9731,6 +9960,9 @@ type DiskTypesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -9980,6 +10212,9 @@ type DisksScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -10233,6 +10468,65 @@ func (s *Duration) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ErrorInfo: Describes the cause of the error with structured details. +// Example of an error when contacting the "pubsub.googleapis.com" API +// when it is not enabled: { "reason": "API_DISABLED" "domain": +// "googleapis.com" "metadata": { "resource": "projects/123", "service": +// "pubsub.googleapis.com" } } This response indicates that the +// pubsub.googleapis.com API is not enabled. Example of an error that is +// returned when attempting to create a Spanner instance in a region +// that is out of stock: { "reason": "STOCKOUT" "domain": +// "spanner.googleapis.com", "metadata": { "availableRegions": +// "us-central1,us-east2" } } +type ErrorInfo struct { + // Domain: The logical grouping to which the "reason" belongs. The error + // domain is typically the registered service name of the tool or + // product that generates the error. Example: "pubsub.googleapis.com". + // If the error is generated by some common infrastructure, the error + // domain must be a globally unique value that identifies the + // infrastructure. For Google API infrastructure, the error domain is + // "googleapis.com". + Domain string `json:"domain,omitempty"` + + // Metadatas: Additional structured details about this error. Keys + // should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the + // units should be contained in the key, not the value. For example, + // rather than {"instanceLimit": "100/request"}, should be returned as, + // {"instanceLimitPerRequest": "100"}, if the client exceeds the number + // of instances that can be created in a single (batch) request. + Metadatas map[string]string `json:"metadatas,omitempty"` + + // Reason: The reason of the error. This is a constant value that + // identifies the proximate cause of the error. Error reasons are unique + // within a particular domain of errors. This should be at most 63 + // characters and match a regular expression of `A-Z+[A-Z0-9]`, which + // represents UPPER_SNAKE_CASE. + Reason string `json:"reason,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Domain") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Domain") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ErrorInfo) MarshalJSON() ([]byte, error) { + type NoMethod ErrorInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ExchangedPeeringRoute struct { // DestRange: The destination range of the route. DestRange string `json:"destRange,omitempty"` @@ -10361,6 +10655,9 @@ type ExchangedPeeringRoutesListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -10767,6 +11064,9 @@ type ExternalVpnGatewayListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -10940,10 +11240,8 @@ type Firewall struct { DestinationRanges []string `json:"destinationRanges,omitempty"` // Direction: Direction of traffic to which this firewall applies, - // either `INGRESS` or `EGRESS`. The default is `INGRESS`. For `INGRESS` - // traffic, you cannot specify the destinationRanges field, and for - // `EGRESS` traffic, you cannot specify the sourceRanges or sourceTags - // fields. + // either `INGRESS` or `EGRESS`. The default is `INGRESS`. For `EGRESS` + // traffic, you cannot specify the sourceTags fields. // // Possible values: // "EGRESS" - Indicates that firewall should apply to outgoing @@ -11235,6 +11533,9 @@ type FirewallListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -11435,9 +11736,10 @@ type FirewallPolicy struct { // DisplayName: Deprecated, please use short name instead. User-provided // name of the Organization firewall policy. The name should be unique // in the organization in which the firewall policy is created. This - // name must be set on creation and cannot be changed. The name must be - // 1-63 characters long, and comply with RFC1035. Specifically, the name - // must be 1-63 characters long and match the regular expression + // field is not applicable to network firewall policies. This name must + // be set on creation and cannot be changed. The name must be 1-63 + // characters long, and comply with RFC1035. Specifically, the name must + // be 1-63 characters long and match the regular expression // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be // a lowercase letter, and all following characters must be a dash, // lowercase letter, or digit, except the last character, which cannot @@ -11462,11 +11764,13 @@ type FirewallPolicy struct { // compute#firewallPolicyfor firewall policies Kind string `json:"kind,omitempty"` - // Name: [Output Only] Name of the resource. It is a numeric ID - // allocated by GCP which uniquely identifies the Firewall Policy. + // Name: Name of the resource. For Organization Firewall Policies it's a + // [Output Only] numeric ID allocated by Google Cloud which uniquely + // identifies the Organization Firewall Policy. Name string `json:"name,omitempty"` - // Parent: [Output Only] The parent of the firewall policy. + // Parent: [Output Only] The parent of the firewall policy. This field + // is not applicable to network firewall policies. Parent string `json:"parent,omitempty"` // Region: [Output Only] URL of the region where the regional firewall @@ -11492,15 +11796,16 @@ type FirewallPolicy struct { // with the resource id. SelfLinkWithId string `json:"selfLinkWithId,omitempty"` - // ShortName: User-provided name of the Organization firewall plicy. The - // name should be unique in the organization in which the firewall - // policy is created. This name must be set on creation and cannot be - // changed. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. + // ShortName: User-provided name of the Organization firewall policy. + // The name should be unique in the organization in which the firewall + // policy is created. This field is not applicable to network firewall + // policies. This name must be set on creation and cannot be changed. + // The name must be 1-63 characters long, and comply with RFC1035. + // Specifically, the name must be 1-63 characters long and match the + // regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first + // character must be a lowercase letter, and all following characters + // must be a dash, lowercase letter, or digit, except the last + // character, which cannot be a dash. ShortName string `json:"shortName,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -11651,6 +11956,9 @@ type FirewallPolicyListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -12046,10 +12354,11 @@ type ForwardingRule struct { // an existing static (reserved) IP address resource. When omitted, // Google Cloud assigns an ephemeral IP address. Use one of the // following formats to specify an IP address while creating a - // forwarding rule: * IP address number, as in `100.1.2.3` * Full - // resource URL, as in - // https://www.googleapis.com/compute/v1/projects/project_id/regions/region - // /addresses/address-name * Partial URL or by name, as in: - + // forwarding rule: * IP address number, as in `100.1.2.3` * IPv6 + // address range, as in `2600:1234::/96` * Full resource URL, as in + // https://www.googleapis.com/compute/v1/projects/ + // project_id/regions/region/addresses/address-name * Partial URL or by + // name, as in: - // projects/project_id/regions/region/addresses/address-name - // regions/region/addresses/address-name - global/addresses/address-name // - address-name The forwarding rule's target or backendService, and in @@ -12077,12 +12386,14 @@ type ForwardingRule struct { // "UDP" IPProtocol string `json:"IPProtocol,omitempty"` - // AllPorts: This field is used along with the backend_service field for - // Internal TCP/UDP Load Balancing or Network Load Balancing, or with - // the target field for internal and external TargetInstance. You can - // only use one of ports and port_range, or allPorts. The three are - // mutually exclusive. For TCP, UDP and SCTP traffic, packets addressed - // to any ports will be forwarded to the target or backendService. + // AllPorts: This field can only be used: - If IPProtocol is one of TCP, + // UDP, or SCTP. - By internal TCP/UDP load balancers, backend + // service-based network load balancers, and internal and external + // protocol forwarding. Set this field to true to allow packets + // addressed to any port or packets lacking destination port information + // (for example, UDP fragments after the first fragment) to be forwarded + // to the backends configured with this forwarding rule. The ports, + // port_range, and allPorts fields are mutually exclusive. AllPorts bool `json:"allPorts,omitempty"` // AllowGlobalAccess: This field is used along with the backend_service @@ -12120,8 +12431,7 @@ type ForwardingRule struct { Id uint64 `json:"id,omitempty,string"` // IpVersion: The IP Version that will be used by this forwarding rule. - // Valid options are IPV4 or IPV6. This can only be specified for an - // external global forwarding rule. + // Valid options are IPV4 or IPV6. // // Possible values: // "IPV4" @@ -12232,28 +12542,37 @@ type ForwardingRule struct { // Non-PSC forwarding rules do not use this field. NoAutomateDnsZone bool `json:"noAutomateDnsZone,omitempty"` - // PortRange: This field can be used only if: - Load balancing scheme is - // one of EXTERNAL, INTERNAL_SELF_MANAGED or INTERNAL_MANAGED - - // IPProtocol is one of TCP, UDP, or SCTP. Packets addressed to ports in - // the specified range will be forwarded to target or backend_service. - // You can only use one of ports, port_range, or allPorts. The three are - // mutually exclusive. Forwarding rules with the same [IPAddress, - // IPProtocol] pair must have disjoint ports. Some types of forwarding - // target have constraints on the acceptable ports. For more - // information, see Port specifications - // (https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications). - // @pattern: \\d+(?:-\\d+)? + // PortRange: This field can only be used: - If IPProtocol is one of + // TCP, UDP, or SCTP. - By backend service-based network load balancers, + // target pool-based network load balancers, internal proxy load + // balancers, external proxy load balancers, Traffic Director, external + // protocol forwarding, and Classic VPN. Some products have restrictions + // on what ports can be used. See port specifications for details. Only + // packets addressed to ports in the specified range will be forwarded + // to the backends configured with this forwarding rule. The ports, + // port_range, and allPorts fields are mutually exclusive. For external + // forwarding rules, two or more forwarding rules cannot use the same + // [IPAddress, IPProtocol] pair, and cannot have overlapping portRanges. + // For internal forwarding rules within the same VPC network, two or + // more forwarding rules cannot use the same [IPAddress, IPProtocol] + // pair, and cannot have overlapping portRanges. @pattern: + // \\d+(?:-\\d+)? PortRange string `json:"portRange,omitempty"` - // Ports: The ports field is only supported when the forwarding rule - // references a backend_service directly. Only packets addressed to the - // specified list of ports - // ((https://cloud.google.com/load-balancing/docs/forwarding-rule-concept - // s#port_specifications)) are forwarded to backends. You can only use - // one of ports and port_range, or allPorts. The three are mutually - // exclusive. You can specify a list of up to five ports, which can be - // non-contiguous. Forwarding rules with the same [IPAddress, - // IPProtocol] pair must have disjoint ports. @pattern: \\d+(?:-\\d+)? + // Ports: This field can only be used: - If IPProtocol is one of TCP, + // UDP, or SCTP. - By internal TCP/UDP load balancers, backend + // service-based network load balancers, and internal protocol + // forwarding. You can specify a list of up to five ports by number, + // separated by commas. The ports can be contiguous or discontiguous. + // Only packets addressed to these ports will be forwarded to the + // backends configured with this forwarding rule. For external + // forwarding rules, two or more forwarding rules cannot use the same + // [IPAddress, IPProtocol] pair, and cannot share any values defined in + // ports. For internal forwarding rules within the same VPC network, two + // or more forwarding rules cannot use the same [IPAddress, IPProtocol] + // pair, and cannot share any values defined in ports. The ports, + // port_range, and allPorts fields are mutually exclusive. @pattern: + // \\d+(?:-\\d+)? Ports []string `json:"ports,omitempty"` // PscConnectionId: [Output Only] The PSC connection id of the PSC @@ -12264,6 +12583,9 @@ type ForwardingRule struct { // "ACCEPTED" - The connection has been accepted by the producer. // "CLOSED" - The connection has been closed by the producer and will // not serve traffic going forward. + // "NEEDS_ATTENTION" - The connection has been accepted by the + // producer, but the producer needs to take further action before the + // forwarding rule can serve traffic. // "PENDING" - The connection is pending acceptance by the producer. // "REJECTED" - The connection has been rejected by the producer. // "STATUS_UNSPECIFIED" @@ -12307,6 +12629,19 @@ type ForwardingRule struct { // subnet mode or when creating external forwarding rule with IPv6. Subnetwork string `json:"subnetwork,omitempty"` + // Target: The URL of the target resource to receive the matched + // traffic. For regional forwarding rules, this target must be in the + // same region as the forwarding rule. For global forwarding rules, this + // target must be a global load balancing resource. The forwarded + // traffic must be of a type appropriate to the target object. - For + // load balancers, see the "Target" column in Port specifications + // (https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). + // - For Private Service Connect forwarding rules that forward traffic + // to Google APIs, provide the name of a supported Google API bundle: - + // vpc-sc - APIs that support VPC Service Controls. - all-apis - All + // supported Google APIs. - For Private Service Connect forwarding rules + // that forward traffic to managed services, the target must be a + // service attachment. Target string `json:"target,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -12416,6 +12751,9 @@ type ForwardingRuleAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -12602,6 +12940,9 @@ type ForwardingRuleListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -12837,6 +13178,9 @@ type ForwardingRulesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -12955,35 +13299,45 @@ type GRPCHealthCheck struct { // The grpc_service_name can only be ASCII. GrpcServiceName string `json:"grpcServiceName,omitempty"` - // Port: The port number for the health check request. Must be specified - // if port_name and port_specification are not set or if - // port_specification is USE_FIXED_PORT. Valid values are 1 through - // 65535. + // Port: The TCP port number to which the health check prober sends + // packets. Valid values are 1 through 65535. Port int64 `json:"port,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. The - // port_name should conform to RFC1035. + // PortName: Not supported. PortName string `json:"portName,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, gRPC health check follows behavior specified in port and - // portName fields. + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Not supported by + // target pools. The health check supports all backends supported by the + // backend service provided the backend can be health checked. For + // example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network + // endpoint groups, and instance group backends. USE_NAMED_PORT: Not + // supported. USE_SERVING_PORT: Provides an indirect method of + // specifying the health check port by referring to the backend service. + // Only supported by backend services for proxy load balancers. Not + // supported by target pools. Not supported by backend services for + // pass-through load balancers. Supports all backends that can be health + // checked; for example, GCE_VM_IP_PORT network endpoint groups and + // instance group backends. For GCE_VM_IP_PORT network endpoint group + // backends, the health check uses the port number specified for each + // endpoint in the network endpoint group. For instance group backends, + // the health check uses the port number determined by looking up the + // backend service's named port in the instance group's list of named + // ports. // // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. PortSpecification string `json:"portSpecification,omitempty"` // ForceSendFields is a list of field names (e.g. "GrpcServiceName") to @@ -13338,36 +13692,52 @@ func (s *GuestOsFeature) MarshalJSON() ([]byte, error) { type HTTP2HealthCheck struct { // Host: The value of the host header in the HTTP/2 health check - // request. If left empty (default value), the IP on behalf of which - // this health check is performed will be used. + // request. If left empty (default value), the host header is set to the + // destination IP address to which health check packets are sent. The + // destination IP address depends on the type of load balancer. For + // details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest Host string `json:"host,omitempty"` - // Port: The TCP port number for the health check request. The default - // value is 443. Valid values are 1 through 65535. + // Port: The TCP port number to which the health check prober sends + // packets. The default value is 443. Valid values are 1 through 65535. Port int64 `json:"port,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. + // PortName: Not supported. PortName string `json:"portName,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, HTTP2 health check follows behavior specified in port and - // portName fields. + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Not supported by + // target pools. The health check supports all backends supported by the + // backend service provided the backend can be health checked. For + // example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network + // endpoint groups, and instance group backends. USE_NAMED_PORT: Not + // supported. USE_SERVING_PORT: Provides an indirect method of + // specifying the health check port by referring to the backend service. + // Only supported by backend services for proxy load balancers. Not + // supported by target pools. Not supported by backend services for + // pass-through load balancers. Supports all backends that can be health + // checked; for example, GCE_VM_IP_PORT network endpoint groups and + // instance group backends. For GCE_VM_IP_PORT network endpoint group + // backends, the health check uses the port number specified for each + // endpoint in the network endpoint group. For instance group backends, + // the health check uses the port number determined by looking up the + // backend service's named port in the instance group's list of named + // ports. // // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. PortSpecification string `json:"portSpecification,omitempty"` // ProxyHeader: Specifies the type of proxy header to append before @@ -13383,9 +13753,12 @@ type HTTP2HealthCheck struct { // default value is /. RequestPath string `json:"requestPath,omitempty"` - // Response: The string to match anywhere in the first 1024 bytes of the - // response body. If left empty (the default value), the status code - // determines health. The response data can only be ASCII. + // Response: Creates a content-based HTTP/2 health check. In addition to + // the required HTTP 200 (OK) status code, you can configure the health + // check to pass only when the backend sends this specific ASCII + // response string within the first 1024 bytes of the HTTP response + // body. For details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http Response string `json:"response,omitempty"` // ForceSendFields is a list of field names (e.g. "Host") to @@ -13413,36 +13786,52 @@ func (s *HTTP2HealthCheck) MarshalJSON() ([]byte, error) { type HTTPHealthCheck struct { // Host: The value of the host header in the HTTP health check request. - // If left empty (default value), the IP on behalf of which this health - // check is performed will be used. + // If left empty (default value), the host header is set to the + // destination IP address to which health check packets are sent. The + // destination IP address depends on the type of load balancer. For + // details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest Host string `json:"host,omitempty"` - // Port: The TCP port number for the health check request. The default - // value is 80. Valid values are 1 through 65535. + // Port: The TCP port number to which the health check prober sends + // packets. The default value is 80. Valid values are 1 through 65535. Port int64 `json:"port,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. + // PortName: Not supported. PortName string `json:"portName,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, HTTP health check follows behavior specified in port and - // portName fields. + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Also supported in + // legacy HTTP health checks for target pools. The health check supports + // all backends supported by the backend service provided the backend + // can be health checked. For example, GCE_VM_IP network endpoint + // groups, GCE_VM_IP_PORT network endpoint groups, and instance group + // backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides + // an indirect method of specifying the health check port by referring + // to the backend service. Only supported by backend services for proxy + // load balancers. Not supported by target pools. Not supported by + // backend services for pass-through load balancers. Supports all + // backends that can be health checked; for example, GCE_VM_IP_PORT + // network endpoint groups and instance group backends. For + // GCE_VM_IP_PORT network endpoint group backends, the health check uses + // the port number specified for each endpoint in the network endpoint + // group. For instance group backends, the health check uses the port + // number determined by looking up the backend service's named port in + // the instance group's list of named ports. // // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. PortSpecification string `json:"portSpecification,omitempty"` // ProxyHeader: Specifies the type of proxy header to append before @@ -13458,9 +13847,12 @@ type HTTPHealthCheck struct { // default value is /. RequestPath string `json:"requestPath,omitempty"` - // Response: The string to match anywhere in the first 1024 bytes of the - // response body. If left empty (the default value), the status code - // determines health. The response data can only be ASCII. + // Response: Creates a content-based HTTP health check. In addition to + // the required HTTP 200 (OK) status code, you can configure the health + // check to pass only when the backend sends this specific ASCII + // response string within the first 1024 bytes of the HTTP response + // body. For details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http Response string `json:"response,omitempty"` // ForceSendFields is a list of field names (e.g. "Host") to @@ -13488,36 +13880,52 @@ func (s *HTTPHealthCheck) MarshalJSON() ([]byte, error) { type HTTPSHealthCheck struct { // Host: The value of the host header in the HTTPS health check request. - // If left empty (default value), the IP on behalf of which this health - // check is performed will be used. + // If left empty (default value), the host header is set to the + // destination IP address to which health check packets are sent. The + // destination IP address depends on the type of load balancer. For + // details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest Host string `json:"host,omitempty"` - // Port: The TCP port number for the health check request. The default - // value is 443. Valid values are 1 through 65535. + // Port: The TCP port number to which the health check prober sends + // packets. The default value is 443. Valid values are 1 through 65535. Port int64 `json:"port,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. + // PortName: Not supported. PortName string `json:"portName,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, HTTPS health check follows behavior specified in port and - // portName fields. + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Not supported by + // target pools. The health check supports all backends supported by the + // backend service provided the backend can be health checked. For + // example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network + // endpoint groups, and instance group backends. USE_NAMED_PORT: Not + // supported. USE_SERVING_PORT: Provides an indirect method of + // specifying the health check port by referring to the backend service. + // Only supported by backend services for proxy load balancers. Not + // supported by target pools. Not supported by backend services for + // pass-through load balancers. Supports all backends that can be health + // checked; for example, GCE_VM_IP_PORT network endpoint groups and + // instance group backends. For GCE_VM_IP_PORT network endpoint group + // backends, the health check uses the port number specified for each + // endpoint in the network endpoint group. For instance group backends, + // the health check uses the port number determined by looking up the + // backend service's named port in the instance group's list of named + // ports. // // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. PortSpecification string `json:"portSpecification,omitempty"` // ProxyHeader: Specifies the type of proxy header to append before @@ -13533,9 +13941,12 @@ type HTTPSHealthCheck struct { // default value is /. RequestPath string `json:"requestPath,omitempty"` - // Response: The string to match anywhere in the first 1024 bytes of the - // response body. If left empty (the default value), the status code - // determines health. The response data can only be ASCII. + // Response: Creates a content-based HTTPS health check. In addition to + // the required HTTP 200 (OK) status code, you can configure the health + // check to pass only when the backend sends this specific ASCII + // response string within the first 1024 bytes of the HTTP response + // body. For details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http Response string `json:"response,omitempty"` // ForceSendFields is a list of field names (e.g. "Host") to @@ -13762,6 +14173,9 @@ type HealthCheckListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -13952,14 +14366,20 @@ type HealthCheckService struct { Fingerprint string `json:"fingerprint,omitempty"` // HealthChecks: A list of URLs to the HealthCheck resources. Must have - // at least one HealthCheck, and not more than 10. HealthCheck resources - // must have portSpecification=USE_SERVING_PORT or + // at least one HealthCheck, and not more than 10 for regional + // HealthCheckService, and not more than 1 for global + // HealthCheckService. HealthCheck resources must have + // portSpecification=USE_SERVING_PORT or // portSpecification=USE_FIXED_PORT. For regional HealthCheckService, // the HealthCheck must be regional and in the same region. For global // HealthCheckService, HealthCheck must be global. Mix of regional and // global HealthChecks is not supported. Multiple regional HealthChecks // must belong to the same region. Regional HealthChecks must belong to - // the same region as zones of NEGs. + // the same region as zones of NetworkEndpointGroups. For global + // HealthCheckService using global INTERNET_IP_PORT + // NetworkEndpointGroups, the global HealthChecks must specify + // sourceRegions, and HealthChecks that specify sourceRegions can only + // be used with global INTERNET_IP_PORT NetworkEndpointGroups. HealthChecks []string `json:"healthChecks,omitempty"` // HealthStatusAggregationPolicy: Optional. Policy for how the results @@ -13969,6 +14389,7 @@ type HealthCheckService struct { // service. - AND. If any health check of an endpoint reports UNHEALTHY, // then UNHEALTHY is the HealthState of the endpoint. If all health // checks report HEALTHY, the HealthState of the endpoint is HEALTHY. . + // This is only allowed with regional HealthCheckService. // // Possible values: // "AND" - If any backend's health check reports UNHEALTHY, then @@ -13999,7 +14420,8 @@ type HealthCheckService struct { // NetworkEndpointGroups: A list of URLs to the NetworkEndpointGroup // resources. Must not have more than 100. For regional // HealthCheckService, NEGs must be in zones in the region of the - // HealthCheckService. + // HealthCheckService. For global HealthCheckServices, the + // NetworkEndpointGroups must be global INTERNET_IP_PORT. NetworkEndpointGroups []string `json:"networkEndpointGroups,omitempty"` // NotificationEndpoints: A list of URLs to the NotificationEndpoint @@ -14157,6 +14579,9 @@ type HealthCheckServicesListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -14345,6 +14770,9 @@ type HealthChecksAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -14509,6 +14937,9 @@ type HealthChecksScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -14745,6 +15176,70 @@ func (s *HealthStatusForNetworkEndpoint) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Help: Provides links to documentation or for performing an out of +// band action. For example, if a quota check failed with an error +// indicating the calling project hasn't enabled the accessed service, +// this can contain a URL pointing directly to the right place in the +// developer console to flip the bit. +type Help struct { + // Links: URL(s) pointing to additional information on handling the + // current error. + Links []*HelpLink `json:"links,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Links") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Links") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Help) MarshalJSON() ([]byte, error) { + type NoMethod Help + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HelpLink: Describes a URL link. +type HelpLink struct { + // Description: Describes what the link offers. + Description string `json:"description,omitempty"` + + // Url: The URL of the link. + Url string `json:"url,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HelpLink) MarshalJSON() ([]byte, error) { + type NoMethod HelpLink + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HostRule: UrlMaps A host-matching rule for a URL. If matched, will // use the named PathMatcher to select the BackendService. type HostRule struct { @@ -15028,8 +15523,8 @@ type HttpHeaderMatch struct { // in the HTTP request, use a headerMatch with headerName set to PORT // and a regular expression that satisfies the RFC2616 Host header's // port specifier. Only one of exactMatch, prefixMatch, suffixMatch, - // regexMatch, presentMatch or rangeMatch must be set. regexMatch only - // applies to load balancers that have loadBalancingScheme set to + // regexMatch, presentMatch or rangeMatch must be set. Regular + // expressions can only be used when the loadBalancingScheme is set to // INTERNAL_SELF_MANAGED. RegexMatch string `json:"regexMatch,omitempty"` @@ -15270,6 +15765,9 @@ type HttpHealthCheckListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -15401,8 +15899,8 @@ type HttpQueryParameterMatch struct { // RegexMatch: The queryParameterMatch matches if the value of the // parameter matches the regular expression specified by regexMatch. For // more information about regular expression syntax, see Syntax. Only - // one of presentMatch, exactMatch, or regexMatch must be set. - // regexMatch only applies when the loadBalancingScheme is set to + // one of presentMatch, exactMatch, or regexMatch must be set. Regular + // expressions can only be used when the loadBalancingScheme is set to // INTERNAL_SELF_MANAGED. RegexMatch string `json:"regexMatch,omitempty"` @@ -15717,9 +16215,9 @@ type HttpRouteRule struct { // backend. If routeAction specifies any weightedBackendServices, // service must not be set. Conversely if service is set, routeAction // cannot contain any weightedBackendServices. Only one of urlRedirect, - // service or routeAction.weightedBackendService must be set. UrlMaps - // for external HTTP(S) load balancers support only the urlRewrite - // action within a route rule's routeAction. + // service or routeAction.weightedBackendService must be set. URL maps + // for Classic external HTTP(S) load balancers only support the + // urlRewrite action within a route rule's routeAction. RouteAction *HttpRouteAction `json:"routeAction,omitempty"` // Service: The full or partial URL of the backend service resource to @@ -15820,8 +16318,8 @@ type HttpRouteRuleMatch struct { // after removing any query parameters and anchor supplied with the // original URL. For more information about regular expression syntax, // see Syntax. Only one of prefixMatch, fullPathMatch or regexMatch must - // be specified. regexMatch only applies to load balancers that have - // loadBalancingScheme set to INTERNAL_SELF_MANAGED. + // be specified. Regular expressions can only be used when the + // loadBalancingScheme is set to INTERNAL_SELF_MANAGED. RegexMatch string `json:"regexMatch,omitempty"` // ForceSendFields is a list of field names (e.g. "FullPathMatch") to @@ -16018,6 +16516,9 @@ type HttpsHealthCheckListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -16130,6 +16631,16 @@ func (s *HttpsHealthCheckListWarningData) MarshalJSON() ([]byte, error) { // Image: Represents an Image resource. You can use images to create // boot disks for your VM instances. For more information, read Images. type Image struct { + // Architecture: The architecture of the image. Valid values are ARM64 + // or X86_64. + // + // Possible values: + // "ARCHITECTURE_UNSPECIFIED" - Default value indicating Architecture + // is not set. + // "ARM64" - Machines with architecture ARM64 + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` + // ArchiveSizeBytes: Size of the image tar.gz archive stored in Google // Cloud Storage (in bytes). ArchiveSizeBytes int64 `json:"archiveSizeBytes,omitempty,string"` @@ -16309,7 +16820,7 @@ type Image struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "ArchiveSizeBytes") to + // ForceSendFields is a list of field names (e.g. "Architecture") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -16317,13 +16828,12 @@ type Image struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ArchiveSizeBytes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Architecture") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -16489,6 +16999,9 @@ type ImageListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -16698,6 +17211,18 @@ type Instance struct { // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` + // KeyRevocationActionType: KeyRevocationActionType of the instance. + // Supported options are "STOP" and "NONE". The default value is "NONE" + // if it is not specified. + // + // Possible values: + // "KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED" - Default value. This + // value is unused. + // "NONE" - Indicates user chose no operation. + // "STOP" - Indicates user chose to opt for VM shutdown on key + // revocation. + KeyRevocationActionType string `json:"keyRevocationActionType,omitempty"` + // Kind: [Output Only] Type of the resource. Always compute#instance for // instances. Kind string `json:"kind,omitempty"` @@ -16799,6 +17324,11 @@ type Instance struct { // ResourcePolicies: Resource policies applied to this instance. ResourcePolicies []string `json:"resourcePolicies,omitempty"` + // ResourceStatus: [Output Only] Specifies values set for instance + // attributes as compared to the values requested by user in the + // corresponding input only field. + ResourceStatus *ResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -16981,6 +17511,9 @@ type InstanceAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -17090,6 +17623,77 @@ func (s *InstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstanceConsumptionData struct { + // ConsumptionInfo: Resources consumed by the instance. + ConsumptionInfo *InstanceConsumptionInfo `json:"consumptionInfo,omitempty"` + + // Instance: Server-defined URL for the instance. + Instance string `json:"instance,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConsumptionInfo") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConsumptionInfo") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstanceConsumptionData) MarshalJSON() ([]byte, error) { + type NoMethod InstanceConsumptionData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceConsumptionInfo struct { + // GuestCpus: The number of virtual CPUs that are available to the + // instance. + GuestCpus int64 `json:"guestCpus,omitempty"` + + // LocalSsdGb: The amount of local SSD storage available to the + // instance, defined in GiB. + LocalSsdGb int64 `json:"localSsdGb,omitempty"` + + // MemoryMb: The amount of physical memory available to the instance, + // defined in MiB. + MemoryMb int64 `json:"memoryMb,omitempty"` + + // MinNodeCpus: The minimal guaranteed number of virtual CPUs that are + // reserved. + MinNodeCpus int64 `json:"minNodeCpus,omitempty"` + + // ForceSendFields is a list of field names (e.g. "GuestCpus") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "GuestCpus") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceConsumptionInfo) MarshalJSON() ([]byte, error) { + type NoMethod InstanceConsumptionInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstanceGroup: Represents an Instance Group resource. Instance Groups // can be used to configure a target for load balancing. Instance groups // can either be managed or unmanaged. To create managed instance @@ -17270,6 +17874,9 @@ type InstanceGroupAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -17457,6 +18064,9 @@ type InstanceGroupListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -17626,6 +18236,19 @@ type InstanceGroupManager struct { // compute#instanceGroupManager for managed instance groups. Kind string `json:"kind,omitempty"` + // ListManagedInstancesResults: Pagination behavior of the + // listManagedInstances API method for this managed instance group. + // + // Possible values: + // "PAGELESS" - (Default) Pagination is disabled for the group's + // listManagedInstances API method. maxResults and pageToken query + // parameters are ignored and all instances are returned in a single + // response. + // "PAGINATED" - Pagination is enabled for the group's + // listManagedInstances API method. maxResults and pageToken query + // parameters are respected. + ListManagedInstancesResults string `json:"listManagedInstancesResults,omitempty"` + // Name: The name of the managed instance group. The name must be 1-63 // characters long, and comply with RFC1035. Name string `json:"name,omitempty"` @@ -17884,6 +18507,9 @@ type InstanceGroupManagerAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -18109,6 +18735,9 @@ type InstanceGroupManagerListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -18884,6 +19513,9 @@ type InstanceGroupManagersListPerInstanceConfigsRespWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -19112,6 +19744,9 @@ type InstanceGroupManagersScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -19429,6 +20064,9 @@ type InstanceGroupsListInstancesWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -19659,6 +20297,9 @@ type InstanceGroupsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -19882,6 +20523,9 @@ type InstanceListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -20069,6 +20713,9 @@ type InstanceListReferrersWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -20422,6 +21069,18 @@ type InstanceProperties struct { // to use for instances created from these properties. GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` + // KeyRevocationActionType: KeyRevocationActionType of the instance. + // Supported options are "STOP" and "NONE". The default value is "NONE" + // if it is not specified. + // + // Possible values: + // "KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED" - Default value. This + // value is unused. + // "NONE" - Indicates user chose no operation. + // "STOP" - Indicates user chose to opt for VM shutdown on key + // revocation. + KeyRevocationActionType string `json:"keyRevocationActionType,omitempty"` + // Labels: Labels to apply to instances that are created from these // properties. Labels map[string]string `json:"labels,omitempty"` @@ -20712,6 +21371,9 @@ type InstanceTemplateListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -21065,6 +21727,9 @@ type InstancesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -21485,8 +22150,9 @@ type Interconnect struct { // NocContactEmail: Email address to contact the customer NOC for // operations and maintenance notifications regarding this Interconnect. // If specified, this will be used for notifications in addition to all - // other forms described, such as Stackdriver logs alerting and Cloud - // Notifications. + // other forms described, such as Cloud Monitoring logs alerting and + // Cloud Notifications. This field is required for users who sign up for + // Cloud Interconnect using workforce identity federation. NocContactEmail string `json:"nocContactEmail,omitempty"` // OperationalStatus: [Output Only] The current status of this @@ -21684,16 +22350,16 @@ type InterconnectAttachment struct { // - The VLAN attachment carries only encrypted traffic that is // encrypted by an IPsec device, such as an HA VPN gateway or // third-party IPsec VPN. VMs cannot directly send traffic to, or - // receive traffic from, such a VLAN attachment. To use *IPsec-encrypted + // receive traffic from, such a VLAN attachment. To use *HA VPN over // Cloud Interconnect*, the VLAN attachment must be created with this - // option. Not currently available publicly. + // option. // // Possible values: // "IPSEC" - The interconnect attachment will carry only encrypted // traffic that is encrypted by an IPsec device such as HA VPN gateway; // VMs cannot directly send traffic to or receive traffic from such an - // interconnect attachment. To use IPsec-encrypted Cloud Interconnect, - // the interconnect attachment must be created with this option. + // interconnect attachment. To use HA VPN over Cloud Interconnect, the + // interconnect attachment must be created with this option. // "NONE" - This is the default value, which means the Interconnect // Attachment will carry unencrypted traffic. VMs will be able to send // traffic to or receive traffic from such interconnect attachment. @@ -21979,6 +22645,9 @@ type InterconnectAttachmentAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -22168,6 +22837,9 @@ type InterconnectAttachmentListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -22412,6 +23084,9 @@ type InterconnectAttachmentsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -22570,6 +23245,27 @@ type InterconnectDiagnostics struct { // Interconnect is not bundled. ArpCaches []*InterconnectDiagnosticsARPEntry `json:"arpCaches,omitempty"` + // BundleAggregationType: The aggregation type of the bundle interface. + // + // Possible values: + // "BUNDLE_AGGREGATION_TYPE_LACP" - LACP is enabled. + // "BUNDLE_AGGREGATION_TYPE_STATIC" - LACP is disabled. + BundleAggregationType string `json:"bundleAggregationType,omitempty"` + + // BundleOperationalStatus: The operational status of the bundle + // interface. + // + // Possible values: + // "BUNDLE_OPERATIONAL_STATUS_DOWN" - If bundleAggregationType is + // LACP: LACP is not established and/or all links in the bundle have + // DOWN operational status. If bundleAggregationType is STATIC: one or + // more links in the bundle has DOWN operational status. + // "BUNDLE_OPERATIONAL_STATUS_UP" - If bundleAggregationType is LACP: + // LACP is established and at least one link in the bundle has UP + // operational status. If bundleAggregationType is STATIC: all links in + // the bundle (typically just one) have UP operational status. + BundleOperationalStatus string `json:"bundleOperationalStatus,omitempty"` + // Links: A list of InterconnectDiagnostics.LinkStatus objects, // describing the status for each link on the Interconnect. Links []*InterconnectDiagnosticsLinkStatus `json:"links,omitempty"` @@ -22759,6 +23455,15 @@ type InterconnectDiagnosticsLinkStatus struct { LacpStatus *InterconnectDiagnosticsLinkLACPStatus `json:"lacpStatus,omitempty"` + // OperationalStatus: The operational status of the link. + // + // Possible values: + // "LINK_OPERATIONAL_STATUS_DOWN" - The interface is unable to + // communicate with the remote end. + // "LINK_OPERATIONAL_STATUS_UP" - The interface has low level + // communication with the remote end. + OperationalStatus string `json:"operationalStatus,omitempty"` + // ReceivingOpticalPower: An InterconnectDiagnostics.LinkOpticalPower // object, describing the current value and status of the received light // level. @@ -22870,6 +23575,9 @@ type InterconnectListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -23178,6 +23886,9 @@ type InterconnectLocationListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -23789,6 +24500,9 @@ type LicensesListResponseWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -23934,6 +24648,40 @@ func (s *LocalDisk) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// LocalizedMessage: Provides a localized error message that is safe to +// return to the user which can be attached to an RPC error. +type LocalizedMessage struct { + // Locale: The locale used following the specification defined at + // https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", + // "fr-CH", "es-MX" + Locale string `json:"locale,omitempty"` + + // Message: The localized error message in the above locale. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Locale") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Locale") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LocalizedMessage) MarshalJSON() ([]byte, error) { + type NoMethod LocalizedMessage + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // LocationPolicy: Configuration for location policy among multiple // possible locations (e.g. preferences for zone selection among zones // in a single region). @@ -23985,7 +24733,12 @@ func (s *LocationPolicy) MarshalJSON() ([]byte, error) { } type LocationPolicyLocation struct { - // Preference: Preference for a given location. + // Constraints: Constraints that the caller requires on the result + // distribution in this zone. + Constraints *LocationPolicyLocationConstraints `json:"constraints,omitempty"` + + // Preference: Preference for a given location. Set to either ALLOW or + // DENY. // // Possible values: // "ALLOW" - Location is allowed for use. @@ -23993,7 +24746,37 @@ type LocationPolicyLocation struct { // "PREFERENCE_UNSPECIFIED" - Default value, unused. Preference string `json:"preference,omitempty"` - // ForceSendFields is a list of field names (e.g. "Preference") to + // ForceSendFields is a list of field names (e.g. "Constraints") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Constraints") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LocationPolicyLocation) MarshalJSON() ([]byte, error) { + type NoMethod LocationPolicyLocation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LocationPolicyLocationConstraints: Per-zone constraints on location +// policy for this zone. +type LocationPolicyLocationConstraints struct { + // MaxCount: Maximum number of items that are allowed to be placed in + // this zone. The value must be non-negative. + MaxCount int64 `json:"maxCount,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxCount") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -24001,7 +24784,7 @@ type LocationPolicyLocation struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Preference") to include in + // NullFields is a list of field names (e.g. "MaxCount") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -24010,8 +24793,8 @@ type LocationPolicyLocation struct { NullFields []string `json:"-"` } -func (s *LocationPolicyLocation) MarshalJSON() ([]byte, error) { - type NoMethod LocationPolicyLocation +func (s *LocationPolicyLocationConstraints) MarshalJSON() ([]byte, error) { + type NoMethod LocationPolicyLocationConstraints raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24398,6 +25181,9 @@ type MachineImageListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -24743,6 +25529,9 @@ type MachineTypeAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -24929,6 +25718,9 @@ type MachineTypeListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -25094,6 +25886,9 @@ type MachineTypesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -25445,6 +26240,13 @@ type ManagedInstanceLastAttemptErrorsErrors struct { // Code: [Output Only] The error type identifier for this error. Code string `json:"code,omitempty"` + // ErrorDetails: [Output Only] An optional list of messages that contain + // the error details. There is a set of defined message types to use for + // providing details.The syntax depends on the error code. For example, + // QuotaExceededInfo will have details when the error code is + // QUOTA_EXCEEDED. + ErrorDetails []*ManagedInstanceLastAttemptErrorsErrorsErrorDetails `json:"errorDetails,omitempty"` + // Location: [Output Only] Indicates the field in the request that // caused the error. This property is optional. Location string `json:"location,omitempty"` @@ -25475,6 +26277,38 @@ func (s *ManagedInstanceLastAttemptErrorsErrors) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type ManagedInstanceLastAttemptErrorsErrorsErrorDetails struct { + ErrorInfo *ErrorInfo `json:"errorInfo,omitempty"` + + Help *Help `json:"help,omitempty"` + + LocalizedMessage *LocalizedMessage `json:"localizedMessage,omitempty"` + + QuotaInfo *QuotaExceededInfo `json:"quotaInfo,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ErrorInfo") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ErrorInfo") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedInstanceLastAttemptErrorsErrorsErrorDetails) MarshalJSON() ([]byte, error) { + type NoMethod ManagedInstanceLastAttemptErrorsErrorsErrorDetails + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ManagedInstanceVersion struct { // InstanceTemplate: [Output Only] The intended template of the // instance. This field is empty when current_action is one of { @@ -25748,6 +26582,10 @@ type Network struct { // prefix fd20::/20. . EnableUlaInternalIpv6 bool `json:"enableUlaInternalIpv6,omitempty"` + // FirewallPolicy: [Output Only] URL of the firewall policy the network + // is associated with. + FirewallPolicy string `json:"firewallPolicy,omitempty"` + // GatewayIPv4: [Output Only] The gateway address for default routing // out of the network, selected by GCP. GatewayIPv4 string `json:"gatewayIPv4,omitempty"` @@ -25770,8 +26608,9 @@ type Network struct { Kind string `json:"kind,omitempty"` // Mtu: Maximum Transmission Unit in bytes. The minimum value for this - // field is 1460 and the maximum value is 1500 bytes. If unspecified, - // defaults to 1460. + // field is 1300 and the maximum value is 8896. The suggested value is + // 1500, which is the default MTU used on the Internet, or 8896 if you + // want to use Jumbo frames. If unspecified, the value defaults to 1460. Mtu int64 `json:"mtu,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -25839,9 +26678,19 @@ func (s *Network) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkEdgeSecurityService: Represents a Google Cloud Armor network -// edge security service resource. -type NetworkEdgeSecurityService struct { +// NetworkAttachment: NetworkAttachments A network attachment resource +// ... +type NetworkAttachment struct { + // ConnectionEndpoints: [Output Only] An array of connections for all + // the producers connected to this network attachment. + ConnectionEndpoints []*NetworkAttachmentConnectedEndpoint `json:"connectionEndpoints,omitempty"` + + // Possible values: + // "ACCEPT_AUTOMATIC" + // "ACCEPT_MANUAL" + // "INVALID" + ConnectionPreference string `json:"connectionPreference,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -25850,21 +26699,17 @@ type NetworkEdgeSecurityService struct { // property when you create the resource. Description string `json:"description,omitempty"` - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a NetworkEdgeSecurityService. An - // up-to-date fingerprint must be provided in order to update the - // NetworkEdgeSecurityService, otherwise the request will fail with - // error 412 conditionNotMet. To see the latest fingerprint, make a - // get() request to retrieve a NetworkEdgeSecurityService. + // Fingerprint: [Output Only] Fingerprint of this resource. A hash of + // the contents stored in this object. This field is used in optimistic + // locking. An up-to-date fingerprint must be provided in order to + // patch. Fingerprint string `json:"fingerprint,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. + // Id: [Output Only] The unique identifier for the resource type. The + // server generates this identifier. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output only] Type of the resource. Always - // compute#networkEdgeSecurityService for NetworkEdgeSecurityServices + // Kind: [Output Only] Type of the resource. Kind string `json:"kind,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -25876,27 +26721,43 @@ type NetworkEdgeSecurityService struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Region: [Output Only] URL of the region where the resource resides. - // You must specify this field as part of the HTTP request URL. It is - // not settable as a field in the request body. - Region string `json:"region,omitempty"` + // Network: [Output Only] The URL of the network which the Network + // Attachment belongs to. + Network string `json:"network,omitempty"` - // SecurityPolicy: The resource URL for the network edge security - // service associated with this network edge security service. - SecurityPolicy string `json:"securityPolicy,omitempty"` + // ProducerAcceptLists: Projects that are allowed to connect to this + // network attachment. The project can be specified using its id or + // number. + ProducerAcceptLists []string `json:"producerAcceptLists,omitempty"` + + // ProducerRejectLists: Projects that are not allowed to connect to this + // network attachment. The project can be specified using its id or + // number. + ProducerRejectLists []string `json:"producerRejectLists,omitempty"` + + // Region: [Output Only] URL of the region where the network attachment + // resides. This field applies only to the region resource. You must + // specify this field as part of the HTTP request URL. It is not + // settable as a field in the request body. + Region string `json:"region,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SelfLinkWithId: [Output Only] Server-defined URL for this resource - // with the resource id. + // SelfLinkWithId: [Output Only] Server-defined URL for this resource's + // resource id. SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // Subnetworks: An array of URLs where each entry is the URL of a subnet + // provided by the service consumer to use for endpoints in the + // producers that connect to this network attachment. + Subnetworks []string `json:"subnetworks,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // ForceSendFields is a list of field names (e.g. "ConnectionEndpoints") // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -25904,7 +26765,7 @@ type NetworkEdgeSecurityService struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "ConnectionEndpoints") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -25914,25 +26775,22 @@ type NetworkEdgeSecurityService struct { NullFields []string `json:"-"` } -func (s *NetworkEdgeSecurityService) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEdgeSecurityService +func (s *NetworkAttachment) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachment raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEdgeSecurityServiceAggregatedList struct { - Etag string `json:"etag,omitempty"` - +// NetworkAttachmentAggregatedList: Contains a list of +// NetworkAttachmentsScopedList. +type NetworkAttachmentAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of NetworkEdgeSecurityServicesScopedList resources. - Items map[string]NetworkEdgeSecurityServicesScopedList `json:"items,omitempty"` + // Items: A list of NetworkAttachmentsScopedList resources. + Items map[string]NetworkAttachmentsScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#networkEdgeSecurityServiceAggregatedList for lists of Network - // Edge Security Services. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -25946,17 +26804,14 @@ type NetworkEdgeSecurityServiceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // Unreachables: [Output Only] Unreachable resources. - Unreachables []string `json:"unreachables,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *NetworkEdgeSecurityServiceAggregatedListWarning `json:"warning,omitempty"` + Warning *NetworkAttachmentAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Etag") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -25964,7 +26819,7 @@ type NetworkEdgeSecurityServiceAggregatedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Etag") to include in API + // NullFields is a list of field names (e.g. "Id") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -25973,15 +26828,15 @@ type NetworkEdgeSecurityServiceAggregatedList struct { NullFields []string `json:"-"` } -func (s *NetworkEdgeSecurityServiceAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEdgeSecurityServiceAggregatedList +func (s *NetworkAttachmentAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkEdgeSecurityServiceAggregatedListWarning: [Output Only] -// Informational warning message. -type NetworkEdgeSecurityServiceAggregatedListWarning struct { +// NetworkAttachmentAggregatedListWarning: [Output Only] Informational +// warning message. +type NetworkAttachmentAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -26003,6 +26858,9 @@ type NetworkEdgeSecurityServiceAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -26046,7 +26904,7 @@ type NetworkEdgeSecurityServiceAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*NetworkEdgeSecurityServiceAggregatedListWarningData `json:"data,omitempty"` + Data []*NetworkAttachmentAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -26069,13 +26927,13 @@ type NetworkEdgeSecurityServiceAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *NetworkEdgeSecurityServiceAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEdgeSecurityServiceAggregatedListWarning +func (s *NetworkAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEdgeSecurityServiceAggregatedListWarningData struct { +type NetworkAttachmentAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -26106,49 +26964,123 @@ type NetworkEdgeSecurityServiceAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkEdgeSecurityServiceAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEdgeSecurityServiceAggregatedListWarningData +func (s *NetworkAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEdgeSecurityServicesScopedList struct { - // NetworkEdgeSecurityServices: A list of NetworkEdgeSecurityServices - // contained in this scope. - NetworkEdgeSecurityServices []*NetworkEdgeSecurityService `json:"networkEdgeSecurityServices,omitempty"` +// NetworkAttachmentConnectedEndpoint: [Output Only] A connection +// connected to this network attachment. +type NetworkAttachmentConnectedEndpoint struct { + // IpAddress: The IP address assigned to the producer instance network + // interface. This value will be a range in case of Serverless. + IpAddress string `json:"ipAddress,omitempty"` - // Warning: Informational warning which replaces the list of security - // policies when the list is empty. - Warning *NetworkEdgeSecurityServicesScopedListWarning `json:"warning,omitempty"` + // ProjectIdOrNum: The project id or number of the interface to which + // the IP was assigned. + ProjectIdOrNum string `json:"projectIdOrNum,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "NetworkEdgeSecurityServices") to unconditionally include in API - // requests. By default, fields with empty or default values are omitted - // from API requests. However, any non-pointer, non-interface field - // appearing in ForceSendFields will be sent to the server regardless of - // whether the field is empty or not. This may be used to include empty - // fields in Patch requests. + // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork + SecondaryIpCidrRanges []string `json:"secondaryIpCidrRanges,omitempty"` + + // Status: The status of a connected endpoint to this network + // attachment. + // + // Possible values: + // "ACCEPTED" - The consumer allows traffic from the producer to reach + // its VPC. + // "CLOSED" - The consumer network attachment no longer exists. + // "NEEDS_ATTENTION" - The consumer needs to take further action + // before traffic can be served. + // "PENDING" - The consumer neither allows nor prohibits traffic from + // the producer to reach its VPC. + // "REJECTED" - The consumer prohibits traffic from the producer to + // reach its VPC. + // "STATUS_UNSPECIFIED" + Status string `json:"status,omitempty"` + + // Subnetwork: The subnetwork used to assign the IP to the producer + // instance network interface. + Subnetwork string `json:"subnetwork,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpAddress") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. - // "NetworkEdgeSecurityServices") to include in API requests with the - // JSON null value. By default, fields with empty values are omitted - // from API requests. However, any field with an empty value appearing - // in NullFields will be sent to the server as null. It is an error if a - // field in this list has a non-empty value. This may be used to include - // null fields in Patch requests. + // NullFields is a list of field names (e.g. "IpAddress") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NetworkEdgeSecurityServicesScopedList) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEdgeSecurityServicesScopedList +func (s *NetworkAttachmentConnectedEndpoint) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentConnectedEndpoint raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkEdgeSecurityServicesScopedListWarning: Informational warning -// which replaces the list of security policies when the list is empty. -type NetworkEdgeSecurityServicesScopedListWarning struct { +type NetworkAttachmentList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of NetworkAttachment resources. + Items []*NetworkAttachment `json:"items,omitempty"` + + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *NetworkAttachmentListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkAttachmentList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkAttachmentListWarning: [Output Only] Informational warning +// message. +type NetworkAttachmentListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -26170,6 +27102,9 @@ type NetworkEdgeSecurityServicesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -26213,7 +27148,7 @@ type NetworkEdgeSecurityServicesScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*NetworkEdgeSecurityServicesScopedListWarningData `json:"data,omitempty"` + Data []*NetworkAttachmentListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -26236,13 +27171,628 @@ type NetworkEdgeSecurityServicesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *NetworkEdgeSecurityServicesScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEdgeSecurityServicesScopedListWarning +func (s *NetworkAttachmentListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEdgeSecurityServicesScopedListWarningData struct { +type NetworkAttachmentListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkAttachmentListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkAttachmentsScopedList struct { + // NetworkAttachments: A list of NetworkAttachments contained in this + // scope. + NetworkAttachments []*NetworkAttachment `json:"networkAttachments,omitempty"` + + // Warning: Informational warning which replaces the list of network + // attachments when the list is empty. + Warning *NetworkAttachmentsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NetworkAttachments") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NetworkAttachments") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NetworkAttachmentsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentsScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkAttachmentsScopedListWarning: Informational warning which +// replaces the list of network attachments when the list is empty. +type NetworkAttachmentsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*NetworkAttachmentsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkAttachmentsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentsScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkAttachmentsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentsScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkEdgeSecurityService: Represents a Google Cloud Armor network +// edge security service resource. +type NetworkEdgeSecurityService struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a NetworkEdgeSecurityService. An + // up-to-date fingerprint must be provided in order to update the + // NetworkEdgeSecurityService, otherwise the request will fail with + // error 412 conditionNotMet. To see the latest fingerprint, make a + // get() request to retrieve a NetworkEdgeSecurityService. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output only] Type of the resource. Always + // compute#networkEdgeSecurityService for NetworkEdgeSecurityServices + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Region: [Output Only] URL of the region where the resource resides. + // You must specify this field as part of the HTTP request URL. It is + // not settable as a field in the request body. + Region string `json:"region,omitempty"` + + // SecurityPolicy: The resource URL for the network edge security + // service associated with this network edge security service. + SecurityPolicy string `json:"securityPolicy,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEdgeSecurityService) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEdgeSecurityService + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkEdgeSecurityServiceAggregatedList struct { + Etag string `json:"etag,omitempty"` + + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of NetworkEdgeSecurityServicesScopedList resources. + Items map[string]NetworkEdgeSecurityServicesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#networkEdgeSecurityServiceAggregatedList for lists of Network + // Edge Security Services. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *NetworkEdgeSecurityServiceAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEdgeSecurityServiceAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEdgeSecurityServiceAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkEdgeSecurityServiceAggregatedListWarning: [Output Only] +// Informational warning message. +type NetworkEdgeSecurityServiceAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*NetworkEdgeSecurityServiceAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEdgeSecurityServiceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEdgeSecurityServiceAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkEdgeSecurityServiceAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEdgeSecurityServiceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEdgeSecurityServiceAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkEdgeSecurityServicesScopedList struct { + // NetworkEdgeSecurityServices: A list of NetworkEdgeSecurityServices + // contained in this scope. + NetworkEdgeSecurityServices []*NetworkEdgeSecurityService `json:"networkEdgeSecurityServices,omitempty"` + + // Warning: Informational warning which replaces the list of security + // policies when the list is empty. + Warning *NetworkEdgeSecurityServicesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "NetworkEdgeSecurityServices") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "NetworkEdgeSecurityServices") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEdgeSecurityServicesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEdgeSecurityServicesScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkEdgeSecurityServicesScopedListWarning: Informational warning +// which replaces the list of security policies when the list is empty. +type NetworkEdgeSecurityServicesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*NetworkEdgeSecurityServicesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEdgeSecurityServicesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEdgeSecurityServicesScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkEdgeSecurityServicesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -26409,6 +27959,8 @@ type NetworkEndpointGroup struct { // serverless infrastructure. NetworkEndpointType string `json:"networkEndpointType,omitempty"` + PscData *NetworkEndpointGroupPscData `json:"pscData,omitempty"` + // PscTargetService: The target service url used to set up private // service connection to a Google API or a PSC Producer Service // Attachment. An example value is: @@ -26542,6 +28094,9 @@ type NetworkEndpointGroupAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -26669,9 +28224,10 @@ type NetworkEndpointGroupAppEngine struct { // and backend services. For example, the request URLs // "foo1-dot-appname.appspot.com/v1" and // "foo1-dot-appname.appspot.com/v2" can be backed by the same - // Serverless NEG with URL mask "-dot-appname.appspot.com/". The URL - // mask will parse them to { service = "foo1", version = "v1" } and { - // service = "foo1", version = "v2" } respectively. + // Serverless NEG with URL mask + // "-dot-appname.appspot.com/". The URL mask will + // parse them to { service = "foo1", version = "v1" } and { service = + // "foo1", version = "v2" } respectively. UrlMask string `json:"urlMask,omitempty"` // Version: Optional serving version. The version name is case-sensitive @@ -26716,8 +28272,8 @@ type NetworkEndpointGroupCloudFunction struct { // create multiple Network Endpoint Groups and backend services. For // example, request URLs " mydomain.com/function1" and // "mydomain.com/function2" can be backed by the same Serverless NEG - // with URL mask "/". The URL mask will parse them to { function = - // "function1" } and { function = "function2" } respectively. + // with URL mask "/". The URL mask will parse them to { + // function = "function1" } and { function = "function2" } respectively. UrlMask string `json:"urlMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Function") to @@ -26870,6 +28426,9 @@ type NetworkEndpointGroupListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -26979,6 +28538,57 @@ func (s *NetworkEndpointGroupListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NetworkEndpointGroupPscData: All data that is specifically relevant +// to only network endpoint groups of type PRIVATE_SERVICE_CONNECT. +type NetworkEndpointGroupPscData struct { + // ConsumerPscAddress: [Output Only] Address allocated from given + // subnetwork for PSC. This IP address acts as a VIP for a PSC NEG, + // allowing it to act as an endpoint in L7 PSC-XLB. + ConsumerPscAddress string `json:"consumerPscAddress,omitempty"` + + // PscConnectionId: [Output Only] The PSC connection id of the PSC + // Network Endpoint Group Consumer. + PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"` + + // PscConnectionStatus: [Output Only] The connection status of the PSC + // Forwarding Rule. + // + // Possible values: + // "ACCEPTED" - The connection has been accepted by the producer. + // "CLOSED" - The connection has been closed by the producer and will + // not serve traffic going forward. + // "NEEDS_ATTENTION" - The connection has been accepted by the + // producer, but the producer needs to take further action before the + // forwarding rule can serve traffic. + // "PENDING" - The connection is pending acceptance by the producer. + // "REJECTED" - The connection has been rejected by the producer. + // "STATUS_UNSPECIFIED" + PscConnectionStatus string `json:"pscConnectionStatus,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConsumerPscAddress") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConsumerPscAddress") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEndpointGroupPscData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupPscData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type NetworkEndpointGroupsAttachEndpointsRequest struct { // NetworkEndpoints: The list of network endpoints to be attached. NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` @@ -27145,6 +28755,9 @@ type NetworkEndpointGroupsListNetworkEndpointsWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -27313,6 +28926,9 @@ type NetworkEndpointGroupsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -27662,6 +29278,9 @@ type NetworkListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -28205,6 +29824,9 @@ type NodeGroup struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // ShareSettings: Share-settings for the node group + ShareSettings *ShareSettings `json:"shareSettings,omitempty"` + // Size: [Output Only] The total number of nodes in the node group. Size int64 `json:"size,omitempty"` @@ -28327,6 +29949,9 @@ type NodeGroupAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -28555,6 +30180,9 @@ type NodeGroupListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -28706,6 +30334,9 @@ type NodeGroupNode struct { // Accelerators: Accelerators for this node. Accelerators []*AcceleratorConfig `json:"accelerators,omitempty"` + // ConsumedResources: Node resources that are reserved by all instances. + ConsumedResources *InstanceConsumptionInfo `json:"consumedResources,omitempty"` + // CpuOvercommitType: CPU overcommit. // // Possible values: @@ -28717,6 +30348,10 @@ type NodeGroupNode struct { // Disks: Local disk configurations. Disks []*LocalDisk `json:"disks,omitempty"` + // InstanceConsumptionData: Instance data that shows consumed resources + // on the node. + InstanceConsumptionData []*InstanceConsumptionData `json:"instanceConsumptionData,omitempty"` + // Instances: Instances scheduled on this node. Instances []string `json:"instances,omitempty"` @@ -28743,6 +30378,9 @@ type NodeGroupNode struct { // "REPAIRING" Status string `json:"status,omitempty"` + // TotalResources: Total amount of available resources on the node. + TotalResources *InstanceConsumptionInfo `json:"totalResources,omitempty"` + // ForceSendFields is a list of field names (e.g. "Accelerators") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -28900,6 +30538,9 @@ type NodeGroupsListNodesWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -29065,6 +30706,9 @@ type NodeGroupsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -29396,6 +31040,9 @@ type NodeTemplateAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -29582,6 +31229,9 @@ type NodeTemplateListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -29777,6 +31427,9 @@ type NodeTemplatesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -30045,6 +31698,9 @@ type NodeTypeAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -30231,6 +31887,9 @@ type NodeTypeListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -30396,6 +32055,9 @@ type NodeTypesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -30597,7 +32259,8 @@ type NotificationEndpointGrpcSettings struct { // ResendInterval: Optional. This field is used to configure how often // to send a full update of all non-healthy backends. If unspecified, // full updates are not sent. If specified, must be in the range between - // 600 seconds to 3600 seconds. Nanos are disallowed. + // 600 seconds to 3600 seconds. Nanos are disallowed. Can only be set + // for regional notification endpoints. ResendInterval *Duration `json:"resendInterval,omitempty"` // RetryDurationSec: How much time (in seconds) is spent attempting @@ -30705,6 +32368,9 @@ type NotificationEndpointListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -30994,6 +32660,13 @@ type OperationErrorErrors struct { // Code: [Output Only] The error type identifier for this error. Code string `json:"code,omitempty"` + // ErrorDetails: [Output Only] An optional list of messages that contain + // the error details. There is a set of defined message types to use for + // providing details.The syntax depends on the error code. For example, + // QuotaExceededInfo will have details when the error code is + // QUOTA_EXCEEDED. + ErrorDetails []*OperationErrorErrorsErrorDetails `json:"errorDetails,omitempty"` + // Location: [Output Only] Indicates the field in the request that // caused the error. This property is optional. Location string `json:"location,omitempty"` @@ -31024,6 +32697,38 @@ func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type OperationErrorErrorsErrorDetails struct { + ErrorInfo *ErrorInfo `json:"errorInfo,omitempty"` + + Help *Help `json:"help,omitempty"` + + LocalizedMessage *LocalizedMessage `json:"localizedMessage,omitempty"` + + QuotaInfo *QuotaExceededInfo `json:"quotaInfo,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ErrorInfo") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ErrorInfo") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationErrorErrorsErrorDetails) MarshalJSON() ([]byte, error) { + type NoMethod OperationErrorErrorsErrorDetails + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type OperationWarnings struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in @@ -31046,6 +32751,9 @@ type OperationWarnings struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -31235,6 +32943,9 @@ type OperationAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -31421,6 +33132,9 @@ type OperationListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -31586,6 +33300,9 @@ type OperationsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -31706,25 +33423,33 @@ type OutlierDetection struct { // ConsecutiveErrors: Number of errors before a host is ejected from the // connection pool. When the backend host is accessed over HTTP, a 5xx - // return code qualifies as an error. Defaults to 5. + // return code qualifies as an error. Defaults to 5. Not supported when + // the backend service is referenced by a URL map that is bound to + // target gRPC proxy that has validateForProxyless field set to true. ConsecutiveErrors int64 `json:"consecutiveErrors,omitempty"` // ConsecutiveGatewayFailure: The number of consecutive gateway failures // (502, 503, 504 status or connection errors that are mapped to one of // those status codes) before a consecutive gateway failure ejection - // occurs. Defaults to 3. + // occurs. Defaults to 3. Not supported when the backend service is + // referenced by a URL map that is bound to target gRPC proxy that has + // validateForProxyless field set to true. ConsecutiveGatewayFailure int64 `json:"consecutiveGatewayFailure,omitempty"` // EnforcingConsecutiveErrors: The percentage chance that a host will be // actually ejected when an outlier status is detected through // consecutive 5xx. This setting can be used to disable ejection or to - // ramp it up slowly. Defaults to 0. + // ramp it up slowly. Defaults to 0. Not supported when the backend + // service is referenced by a URL map that is bound to target gRPC proxy + // that has validateForProxyless field set to true. EnforcingConsecutiveErrors int64 `json:"enforcingConsecutiveErrors,omitempty"` // EnforcingConsecutiveGatewayFailure: The percentage chance that a host // will be actually ejected when an outlier status is detected through // consecutive gateway failures. This setting can be used to disable - // ejection or to ramp it up slowly. Defaults to 100. + // ejection or to ramp it up slowly. Defaults to 100. Not supported when + // the backend service is referenced by a URL map that is bound to + // target gRPC proxy that has validateForProxyless field set to true. EnforcingConsecutiveGatewayFailure int64 `json:"enforcingConsecutiveGatewayFailure,omitempty"` // EnforcingSuccessRate: The percentage chance that a host will be @@ -32033,6 +33758,9 @@ type PacketMirroringAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -32298,6 +34026,9 @@ type PacketMirroringListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -32599,6 +34330,9 @@ type PacketMirroringsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -32719,9 +34453,9 @@ type PathMatcher struct { // defaultRouteAction specifies any weightedBackendServices, // defaultService must not be set. Conversely if defaultService is set, // defaultRouteAction cannot contain any weightedBackendServices. Only - // one of defaultRouteAction or defaultUrlRedirect must be set. UrlMaps - // for external HTTP(S) load balancers support only the urlRewrite - // action within a path matcher's defaultRouteAction. + // one of defaultRouteAction or defaultUrlRedirect must be set. URL maps + // for Classic external HTTP(S) load balancers only support the + // urlRewrite action within a path matcher's defaultRouteAction. DefaultRouteAction *HttpRouteAction `json:"defaultRouteAction,omitempty"` // DefaultService: The full or partial URL to the BackendService @@ -32824,9 +34558,9 @@ type PathRule struct { // backend. If routeAction specifies any weightedBackendServices, // service must not be set. Conversely if service is set, routeAction // cannot contain any weightedBackendServices. Only one of routeAction - // or urlRedirect must be set. URL maps for external HTTP(S) load - // balancers support only the urlRewrite action within a path rule's - // routeAction. + // or urlRedirect must be set. URL maps for Classic external HTTP(S) + // load balancers only support the urlRewrite action within a path + // rule's routeAction. RouteAction *HttpRouteAction `json:"routeAction,omitempty"` // Service: The full or partial URL of the backend service resource to @@ -33221,6 +34955,16 @@ type Project struct { // the Google Cloud Storage bucket where they are stored. UsageExportLocation *UsageExportLocation `json:"usageExportLocation,omitempty"` + // VmDnsSetting: [Output Only] Default internal DNS setting used by VMs + // running in this project. + // + // Possible values: + // "GLOBAL_DEFAULT" + // "UNSPECIFIED_VM_DNS_SETTING" + // "ZONAL_DEFAULT" + // "ZONAL_ONLY" + VmDnsSetting string `json:"vmDnsSetting,omitempty"` + // XpnProjectStatus: [Output Only] The role this project has in a shared // VPC configuration. Currently, only projects with the host role, which // is specified by the value HOST, are differentiated. @@ -33607,6 +35351,9 @@ type PublicAdvertisedPrefixListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -33952,6 +35699,9 @@ type PublicDelegatedPrefixAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -34138,6 +35888,9 @@ type PublicDelegatedPrefixListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -34363,6 +36116,9 @@ type PublicDelegatedPrefixesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -34521,8 +36277,12 @@ type Quota struct { // "EXTERNAL_VPN_GATEWAYS" // "FIREWALLS" // "FORWARDING_RULES" + // "GLOBAL_EXTERNAL_MANAGED_BACKEND_SERVICES" // "GLOBAL_EXTERNAL_MANAGED_FORWARDING_RULES" + // "GLOBAL_EXTERNAL_PROXY_LB_BACKEND_SERVICES" // "GLOBAL_INTERNAL_ADDRESSES" + // "GLOBAL_INTERNAL_MANAGED_BACKEND_SERVICES" + // "GLOBAL_INTERNAL_TRAFFIC_DIRECTOR_BACKEND_SERVICES" // "GPUS_ALL_REGIONS" // "HEALTH_CHECKS" // "IMAGES" @@ -34582,7 +36342,11 @@ type Quota struct { // "PUBLIC_ADVERTISED_PREFIXES" // "PUBLIC_DELEGATED_PREFIXES" // "REGIONAL_AUTOSCALERS" + // "REGIONAL_EXTERNAL_MANAGED_BACKEND_SERVICES" + // "REGIONAL_EXTERNAL_NETWORK_LB_BACKEND_SERVICES" // "REGIONAL_INSTANCE_GROUP_MANAGERS" + // "REGIONAL_INTERNAL_LB_BACKEND_SERVICES" + // "REGIONAL_INTERNAL_MANAGED_BACKEND_SERVICES" // "RESERVATIONS" // "RESOURCE_POLICIES" // "ROUTERS" @@ -34599,6 +36363,7 @@ type Quota struct { // "SSL_CERTIFICATES" // "STATIC_ADDRESSES" // "STATIC_BYOIP_ADDRESSES" + // "STATIC_EXTERNAL_IPV6_ADDRESS_RANGES" // "SUBNETWORKS" // "T2A_CPUS" // "T2D_CPUS" @@ -34661,6 +36426,59 @@ func (s *Quota) UnmarshalJSON(data []byte) error { return nil } +// QuotaExceededInfo: Additional details for quota exceeded error for +// resource quota. +type QuotaExceededInfo struct { + // Dimensions: The map holding related quota dimensions. + Dimensions map[string]string `json:"dimensions,omitempty"` + + // Limit: Current effective quota limit. The limit's unit depends on the + // quota type or metric. + Limit float64 `json:"limit,omitempty"` + + // LimitName: The name of the quota limit. + LimitName string `json:"limitName,omitempty"` + + // MetricName: The Compute Engine quota metric name. + MetricName string `json:"metricName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Dimensions") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Dimensions") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *QuotaExceededInfo) MarshalJSON() ([]byte, error) { + type NoMethod QuotaExceededInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *QuotaExceededInfo) UnmarshalJSON(data []byte) error { + type NoMethod QuotaExceededInfo + var s1 struct { + Limit gensupport.JSONFloat64 `json:"limit"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Limit = float64(s1.Limit) + return nil +} + // Reference: Represents a reference to a resource. type Reference struct { // Kind: [Output Only] Type of the resource. Always compute#reference @@ -34851,6 +36669,9 @@ type RegionAutoscalerListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -35037,6 +36858,9 @@ type RegionDiskTypeListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -35307,6 +37131,9 @@ type RegionInstanceGroupListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -35526,6 +37353,9 @@ type RegionInstanceGroupManagerListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -35970,6 +37800,9 @@ type RegionInstanceGroupManagersListInstanceConfigsRespWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -36287,6 +38120,9 @@ type RegionInstanceGroupsListInstancesWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -36547,6 +38383,9 @@ type RegionListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -36810,8 +38649,7 @@ func (s *RegionSetPolicyRequest) MarshalJSON() ([]byte, error) { type RegionTargetHttpsProxiesSetSslCertificatesRequest struct { // SslCertificates: New set of SslCertificate resources to associate - // with this TargetHttpsProxy resource. Currently exactly one - // SslCertificate resource must be specified. + // with this TargetHttpsProxy resource. SslCertificates []string `json:"sslCertificates,omitempty"` // ForceSendFields is a list of field names (e.g. "SslCertificates") to @@ -36872,7 +38710,10 @@ func (s *RegionUrlMapsValidateRequest) MarshalJSON() ([]byte, error) { // authority header is suffixed with -shadow. type RequestMirrorPolicy struct { // BackendService: The full or partial URL to the BackendService - // resource being mirrored to. + // resource being mirrored to. The backend service configured for a + // mirroring policy must reference backends that are of the same type as + // the original backend service matched in the URL map. Serverless NEG + // backends are not currently supported as a mirrored backend service. BackendService string `json:"backendService,omitempty"` // ForceSendFields is a list of field names (e.g. "BackendService") to @@ -36940,7 +38781,10 @@ type Reservation struct { // resource. SelfLink string `json:"selfLink,omitempty"` - // ShareSettings: Share-settings for shared-reservation + // ShareSettings: Specify share-settings to create a shared reservation. + // This property is optional. For more information about the syntax and + // options for this field and its subfields, see the guide for creating + // a shared reservation. ShareSettings *ShareSettings `json:"shareSettings,omitempty"` // SpecificReservation: Reservation for instances with specific machine @@ -37128,6 +38972,9 @@ type ReservationAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -37313,6 +39160,9 @@ type ReservationListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -37506,6 +39356,9 @@ type ReservationsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -37629,7 +39482,7 @@ type ResourceCommitment struct { Amount int64 `json:"amount,omitempty,string"` // Type: Type of resource for which this commitment applies. Possible - // values are VCPU and MEMORY + // values are VCPU, MEMORY, LOCAL_SSD, and ACCELERATOR. // // Possible values: // "ACCELERATOR" @@ -37747,6 +39600,9 @@ type ResourcePoliciesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -38027,6 +39883,9 @@ type ResourcePolicyAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -38416,6 +40275,9 @@ type ResourcePolicyListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -38824,6 +40686,37 @@ func (s *ResourcePolicyWeeklyCycleDayOfWeek) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ResourceStatus: Contains output only fields. Use this sub-message for +// actual values set on Instance attributes as compared to the value +// requested by the user (intent) in their instance CRUD calls. +type ResourceStatus struct { + // PhysicalHost: [Output Only] An opaque ID of the host on which the VM + // is running. + PhysicalHost string `json:"physicalHost,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PhysicalHost") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PhysicalHost") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResourceStatus) MarshalJSON() ([]byte, error) { + type NoMethod ResourceStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Route: Represents a Route resource. A route defines a path from VM // instances in the VPC network to a specific destination. This // destination can be inside or outside the VPC network. For more @@ -38995,6 +40888,9 @@ type RouteWarnings struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -39223,6 +41119,9 @@ type RouteListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -39353,8 +41252,7 @@ type Router struct { Description string `json:"description,omitempty"` // EncryptedInterconnectRouter: Indicates if a router is dedicated for - // use with encrypted VLAN attachments (interconnectAttachments). Not - // currently available publicly. + // use with encrypted VLAN attachments (interconnectAttachments). EncryptedInterconnectRouter bool `json:"encryptedInterconnectRouter,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -39370,6 +41268,9 @@ type Router struct { // routers. Kind string `json:"kind,omitempty"` + // Md5AuthenticationKeys: Keys used for MD5 authentication. + Md5AuthenticationKeys []*RouterMd5AuthenticationKey `json:"md5AuthenticationKeys,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -39533,6 +41434,9 @@ type RouterAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -39794,6 +41698,11 @@ type RouterBgpPeer struct { // and managed by user. ManagementType string `json:"managementType,omitempty"` + // Md5AuthenticationKeyName: Present if MD5 authentication is enabled + // for the peering. Must be the name of one of the entries in the + // Router.md5_authentication_keys. The field must comply with RFC1035. + Md5AuthenticationKeyName string `json:"md5AuthenticationKeyName,omitempty"` + // Name: Name of this BGP peer. The name must be 1-63 characters long, // and comply with RFC1035. Specifically, the name must be 1-63 // characters long and match the regular expression @@ -40078,6 +41987,9 @@ type RouterListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -40187,6 +42099,41 @@ func (s *RouterListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type RouterMd5AuthenticationKey struct { + // Key: [Input only] Value of the key. For patch and update calls, it + // can be skipped to copy the value from the previous configuration. + // This is allowed if the key with the same name existed before the + // operation. Maximum length is 80 characters. Can only contain + // printable ASCII characters. + Key string `json:"key,omitempty"` + + // Name: Name used to identify the key. Must be unique within a router. + // Must be referenced by at least one bgpPeer. Must comply with RFC1035. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouterMd5AuthenticationKey) MarshalJSON() ([]byte, error) { + type NoMethod RouterMd5AuthenticationKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // RouterNat: Represents a Nat resource. It enables the VMs within the // specified subnetworks to access Internet without external IP // addresses. It specifies a list of subnetworks (and the ranges within) @@ -40553,12 +42500,23 @@ type RouterStatusBgpPeerStatus struct { BfdStatus *BfdStatus `json:"bfdStatus,omitempty"` + // EnableIpv6: Enable IPv6 traffic over BGP Peer. If not specified, it + // is disabled by default. + EnableIpv6 bool `json:"enableIpv6,omitempty"` + // IpAddress: IP address of the local BGP interface. IpAddress string `json:"ipAddress,omitempty"` + // Ipv6NexthopAddress: IPv6 address of the local BGP interface. + Ipv6NexthopAddress string `json:"ipv6NexthopAddress,omitempty"` + // LinkedVpnTunnel: URL of the VPN tunnel that this BGP peer controls. LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` + // Md5AuthEnabled: Informs whether MD5 authentication is enabled on this + // BGP peer. + Md5AuthEnabled bool `json:"md5AuthEnabled,omitempty"` + // Name: Name of this BGP peer. Unique within the Routers resource. Name string `json:"name,omitempty"` @@ -40568,6 +42526,9 @@ type RouterStatusBgpPeerStatus struct { // PeerIpAddress: IP address of the remote BGP interface. PeerIpAddress string `json:"peerIpAddress,omitempty"` + // PeerIpv6NexthopAddress: IPv6 address of the remote BGP interface. + PeerIpv6NexthopAddress string `json:"peerIpv6NexthopAddress,omitempty"` + // RouterApplianceInstance: [Output only] URI of the VM instance that is // used as third-party router appliances such as Next Gen Firewalls, // Virtual Routers, or Router Appliances. The VM instance is the peer @@ -40586,6 +42547,15 @@ type RouterStatusBgpPeerStatus struct { // "UP" Status string `json:"status,omitempty"` + // StatusReason: Indicates why particular status was returned. + // + // Possible values: + // "MD5_AUTH_INTERNAL_PROBLEM" - Indicates internal problems with + // configuration of MD5 authentication. This particular reason can only + // be returned when md5AuthEnabled is true and status is DOWN. + // "STATUS_REASON_UNSPECIFIED" + StatusReason string `json:"statusReason,omitempty"` + // Uptime: Time this session has been up. Format: 14 years, 51 weeks, 6 // days, 23 hours, 59 minutes, 59 seconds Uptime string `json:"uptime,omitempty"` @@ -40844,6 +42814,9 @@ type RoutersScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -41009,32 +42982,45 @@ func (s *Rule) MarshalJSON() ([]byte, error) { } type SSLHealthCheck struct { - // Port: The TCP port number for the health check request. The default - // value is 443. Valid values are 1 through 65535. + // Port: The TCP port number to which the health check prober sends + // packets. The default value is 443. Valid values are 1 through 65535. Port int64 `json:"port,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. + // PortName: Not supported. PortName string `json:"portName,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, SSL health check follows behavior specified in port and - // portName fields. + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Not supported by + // target pools. The health check supports all backends supported by the + // backend service provided the backend can be health checked. For + // example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network + // endpoint groups, and instance group backends. USE_NAMED_PORT: Not + // supported. USE_SERVING_PORT: Provides an indirect method of + // specifying the health check port by referring to the backend service. + // Only supported by backend services for proxy load balancers. Not + // supported by target pools. Not supported by backend services for + // pass-through load balancers. Supports all backends that can be health + // checked; for example, GCE_VM_IP_PORT network endpoint groups and + // instance group backends. For GCE_VM_IP_PORT network endpoint group + // backends, the health check uses the port number specified for each + // endpoint in the network endpoint group. For instance group backends, + // the health check uses the port number determined by looking up the + // backend service's named port in the instance group's list of named + // ports. // // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. PortSpecification string `json:"portSpecification,omitempty"` // ProxyHeader: Specifies the type of proxy header to append before @@ -41046,15 +43032,17 @@ type SSLHealthCheck struct { // "PROXY_V1" ProxyHeader string `json:"proxyHeader,omitempty"` - // Request: The application data to send once the SSL connection has - // been established (default value is empty). If both request and - // response are empty, the connection establishment alone will indicate - // health. The request data can only be ASCII. + // Request: Instructs the health check prober to send this exact ASCII + // string, up to 1024 bytes in length, after establishing the TCP + // connection and SSL handshake. Request string `json:"request,omitempty"` - // Response: The bytes to match against the beginning of the response - // data. If left empty (the default value), any response will indicate - // health. The response data can only be ASCII. + // Response: Creates a content-based SSL health check. In addition to + // establishing a TCP connection and the TLS handshake, you can + // configure the health check to pass only when the backend sends this + // exact response ASCII string, up to 1024 bytes in length. For details, + // see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-ssl-tcp Response string `json:"response,omitempty"` // ForceSendFields is a list of field names (e.g. "Port") to @@ -41192,6 +43180,15 @@ func (s *SavedAttachedDisk) MarshalJSON() ([]byte, error) { // SavedDisk: An instance-attached disk resource. type SavedDisk struct { + // Architecture: [Output Only] The architecture of the attached disk. + // + // Possible values: + // "ARCHITECTURE_UNSPECIFIED" - Default value indicating Architecture + // is not set. + // "ARM64" - Machines with architecture ARM64 + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` + // Kind: [Output Only] Type of the resource. Always compute#savedDisk // for attached disks. Kind string `json:"kind,omitempty"` @@ -41215,7 +43212,7 @@ type SavedDisk struct { // "UP_TO_DATE" StorageBytesStatus string `json:"storageBytesStatus,omitempty"` - // ForceSendFields is a list of field names (e.g. "Kind") to + // ForceSendFields is a list of field names (e.g. "Architecture") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -41223,10 +43220,10 @@ type SavedDisk struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Architecture") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -41285,7 +43282,7 @@ func (s *ScalingScheduleStatus) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Scheduling: Sets the scheduling options for an Instance. NextID: 21 +// Scheduling: Sets the scheduling options for an Instance. type Scheduling struct { // AutomaticRestart: Specifies whether the instance should be // automatically restarted if it is terminated by Compute Engine (not @@ -41532,6 +43529,9 @@ type SecurityPoliciesAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -41729,6 +43729,9 @@ type SecurityPoliciesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -41918,9 +43921,11 @@ type SecurityPolicy struct { Region string `json:"region,omitempty"` // Rules: A list of rules that belong to this policy. There must always - // be a default rule (rule with priority 2147483647 and match "*"). If - // no rules are provided when creating a security policy, a default rule - // with action "allow" will be added. + // be a default rule which is a rule with priority 2147483647 and match + // all condition (for the match condition this means match "*" for + // srcIpRanges and for the networkMatch condition every field must be + // either match "*" or not set). If no rules are provided when creating + // a security policy, a default rule with action "allow" will be added. Rules []*SecurityPolicyRule `json:"rules,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. @@ -42045,6 +44050,10 @@ func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig) MarshalJ } type SecurityPolicyAdvancedOptionsConfig struct { + // JsonCustomConfig: Custom configuration to apply the JSON parsing. + // Only applicable when json_parsing is set to STANDARD. + JsonCustomConfig *SecurityPolicyAdvancedOptionsConfigJsonCustomConfig `json:"jsonCustomConfig,omitempty"` + // Possible values: // "DISABLED" // "STANDARD" @@ -42055,7 +44064,7 @@ type SecurityPolicyAdvancedOptionsConfig struct { // "VERBOSE" LogLevel string `json:"logLevel,omitempty"` - // ForceSendFields is a list of field names (e.g. "JsonParsing") to + // ForceSendFields is a list of field names (e.g. "JsonCustomConfig") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -42063,7 +44072,40 @@ type SecurityPolicyAdvancedOptionsConfig struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "JsonParsing") to include + // NullFields is a list of field names (e.g. "JsonCustomConfig") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyAdvancedOptionsConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyAdvancedOptionsConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SecurityPolicyAdvancedOptionsConfigJsonCustomConfig struct { + // ContentTypes: A list of custom Content-Type header values to apply + // the JSON parsing. As per RFC 1341, a Content-Type header value has + // the following format: Content-Type := type "/" subtype *[";" + // parameter] When configuring a custom Content-Type header value, only + // the type/subtype needs to be specified, and the parameters should be + // excluded. + ContentTypes []string `json:"contentTypes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ContentTypes") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ContentTypes") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -42072,8 +44114,8 @@ type SecurityPolicyAdvancedOptionsConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyAdvancedOptionsConfig) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyAdvancedOptionsConfig +func (s *SecurityPolicyAdvancedOptionsConfigJsonCustomConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyAdvancedOptionsConfigJsonCustomConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -42182,6 +44224,9 @@ type SecurityPolicyListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -42584,26 +44629,34 @@ type SecurityPolicyRuleRateLimitOptions struct { // EnforceOnKey: Determines the key to enforce the rate_limit_threshold // on. Possible values are: - ALL: A single rate limit threshold is // applied to all the requests matching this rule. This is the default - // value if this field 'enforce_on_key' is not configured. - IP: The - // source IP address of the request is the key. Each IP has this limit - // enforced separately. - HTTP_HEADER: The value of the HTTP header - // whose name is configured under "enforce_on_key_name". The key value - // is truncated to the first 128 bytes of the header value. If no such - // header is present in the request, the key type defaults to ALL. - - // XFF_IP: The first IP address (i.e. the originating client IP address) - // specified in the list of IPs under X-Forwarded-For HTTP header. If no - // such header is present or the value is not a valid IP, the key - // defaults to the source IP address of the request i.e. key type IP. - - // HTTP_COOKIE: The value of the HTTP cookie whose name is configured - // under "enforce_on_key_name". The key value is truncated to the first - // 128 bytes of the cookie value. If no such cookie is present in the - // request, the key type defaults to ALL. + // value if "enforceOnKey" is not configured. - IP: The source IP + // address of the request is the key. Each IP has this limit enforced + // separately. - HTTP_HEADER: The value of the HTTP header whose name is + // configured under "enforceOnKeyName". The key value is truncated to + // the first 128 bytes of the header value. If no such header is present + // in the request, the key type defaults to ALL. - XFF_IP: The first IP + // address (i.e. the originating client IP address) specified in the + // list of IPs under X-Forwarded-For HTTP header. If no such header is + // present or the value is not a valid IP, the key defaults to the + // source IP address of the request i.e. key type IP. - HTTP_COOKIE: The + // value of the HTTP cookie whose name is configured under + // "enforceOnKeyName". The key value is truncated to the first 128 bytes + // of the cookie value. If no such cookie is present in the request, the + // key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP + // request. The key value is truncated to the first 128 bytes. - SNI: + // Server name indication in the TLS session of the HTTPS request. The + // key value is truncated to the first 128 bytes. The key type defaults + // to ALL on a HTTP session. - REGION_CODE: The country/region from + // which the request originates. // // Possible values: // "ALL" // "HTTP_COOKIE" // "HTTP_HEADER" + // "HTTP_PATH" // "IP" + // "REGION_CODE" + // "SNI" // "XFF_IP" EnforceOnKey string `json:"enforceOnKey,omitempty"` @@ -43098,6 +45151,9 @@ type ServiceAttachmentAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -43222,6 +45278,9 @@ type ServiceAttachmentConnectedEndpoint struct { // Possible values: // "ACCEPTED" - The connection has been accepted by the producer. // "CLOSED" - The connection has been closed by the producer. + // "NEEDS_ATTENTION" - The connection has been accepted by the + // producer, but the producer needs to take further action before the + // forwarding rule can serve traffic. // "PENDING" - The connection is pending acceptance by the producer. // "REJECTED" - The consumer is still connected but not using the // connection. @@ -43255,6 +45314,9 @@ type ServiceAttachmentConsumerProjectLimit struct { // ConnectionLimit: The value of the limit to set. ConnectionLimit int64 `json:"connectionLimit,omitempty"` + // NetworkUrl: The network URL for the network to set the limit for. + NetworkUrl string `json:"networkUrl,omitempty"` + // ProjectIdOrNum: The project id or number for the project to set the // limit for. ProjectIdOrNum string `json:"projectIdOrNum,omitempty"` @@ -43360,6 +45422,9 @@ type ServiceAttachmentListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -43526,6 +45591,9 @@ type ServiceAttachmentsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -43646,6 +45714,7 @@ type ShareSettings struct { // // Possible values: // "LOCAL" - Default value. + // "ORGANIZATION" - Shared-reservation is open to entire Organization // "SHARE_TYPE_UNSPECIFIED" - Default value. This value is unused. // "SPECIFIC_PROJECTS" - Shared-reservation is open to specific // projects @@ -43890,6 +45959,16 @@ func (s *SignedUrlKey) MarshalJSON() ([]byte, error) { // snapshots to back up data on a regular interval. For more // information, read Creating persistent disk snapshots. type Snapshot struct { + // Architecture: [Output Only] The architecture of the snapshot. Valid + // values are ARM64 or X86_64. + // + // Possible values: + // "ARCHITECTURE_UNSPECIFIED" - Default value indicating Architecture + // is not set. + // "ARM64" - Machines with architecture ARM64 + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` + // AutoCreated: [Output Only] Set to true if snapshots are automatically // created by applying resource policy on the target disk. AutoCreated bool `json:"autoCreated,omitempty"` @@ -43902,6 +45981,10 @@ type Snapshot struct { // resource, this field is visible only if it has a non-empty value. ChainName string `json:"chainName,omitempty"` + // CreationSizeBytes: [Output Only] Size in bytes of the snapshot at + // creation time. + CreationSizeBytes int64 `json:"creationSizeBytes,omitempty,string"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -43980,6 +46063,13 @@ type Snapshot struct { // use the snapshot later. SnapshotEncryptionKey *CustomerEncryptionKey `json:"snapshotEncryptionKey,omitempty"` + // SnapshotType: Indicates the type of the snapshot. + // + // Possible values: + // "ARCHIVE" + // "STANDARD" + SnapshotType string `json:"snapshotType,omitempty"` + // SourceDisk: The source disk used to create this snapshot. SourceDisk string `json:"sourceDisk,omitempty"` @@ -43994,6 +46084,14 @@ type Snapshot struct { // disk name. SourceDiskId string `json:"sourceDiskId,omitempty"` + // SourceSnapshotSchedulePolicy: [Output Only] URL of the resource + // policy which created this scheduled snapshot. + SourceSnapshotSchedulePolicy string `json:"sourceSnapshotSchedulePolicy,omitempty"` + + // SourceSnapshotSchedulePolicyId: [Output Only] ID of the resource + // policy which created this scheduled snapshot. + SourceSnapshotSchedulePolicyId string `json:"sourceSnapshotSchedulePolicyId,omitempty"` + // Status: [Output Only] The status of the snapshot. This can be // CREATING, DELETING, FAILED, READY, or UPLOADING. // @@ -44029,7 +46127,7 @@ type Snapshot struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "AutoCreated") to + // ForceSendFields is a list of field names (e.g. "Architecture") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -44037,7 +46135,7 @@ type Snapshot struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoCreated") to include + // NullFields is a list of field names (e.g. "Architecture") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -44128,6 +46226,9 @@ type SnapshotListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -44337,6 +46438,18 @@ type SourceInstanceProperties struct { // to use for instances created from this machine image. GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` + // KeyRevocationActionType: KeyRevocationActionType of the instance. + // Supported options are "STOP" and "NONE". The default value is "NONE" + // if it is not specified. + // + // Possible values: + // "KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED" - Default value. This + // value is unused. + // "NONE" - Indicates user chose no operation. + // "STOP" - Indicates user chose to opt for VM shutdown on key + // revocation. + KeyRevocationActionType string `json:"keyRevocationActionType,omitempty"` + // Labels: Labels to apply to instances that are created from this // machine image. Labels map[string]string `json:"labels,omitempty"` @@ -44591,6 +46704,9 @@ type SslCertificateAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -44777,6 +46893,9 @@ type SslCertificateListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -45037,6 +47156,9 @@ type SslCertificatesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -45146,16 +47268,18 @@ func (s *SslCertificatesScopedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPoliciesList struct { +type SslPoliciesAggregatedList struct { + Etag string `json:"etag,omitempty"` + // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of SslPolicy resources. - Items []*SslPolicy `json:"items,omitempty"` + // Items: A list of SslPoliciesScopedList resources. + Items map[string]SslPoliciesScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#sslPoliciesList for lists of sslPolicies. + // Kind: [Output Only] Type of resource. Always + // compute#sslPolicyAggregatedList for lists of SSL Policies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -45169,14 +47293,17 @@ type SslPoliciesList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *SslPoliciesListWarning `json:"warning,omitempty"` + Warning *SslPoliciesAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Etag") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -45184,7 +47311,7 @@ type SslPoliciesList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Etag") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -45193,14 +47320,15 @@ type SslPoliciesList struct { NullFields []string `json:"-"` } -func (s *SslPoliciesList) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesList +func (s *SslPoliciesAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslPoliciesListWarning: [Output Only] Informational warning message. -type SslPoliciesListWarning struct { +// SslPoliciesAggregatedListWarning: [Output Only] Informational warning +// message. +type SslPoliciesAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -45222,6 +47350,9 @@ type SslPoliciesListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -45265,7 +47396,7 @@ type SslPoliciesListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*SslPoliciesListWarningData `json:"data,omitempty"` + Data []*SslPoliciesAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -45288,13 +47419,13 @@ type SslPoliciesListWarning struct { NullFields []string `json:"-"` } -func (s *SslPoliciesListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesListWarning +func (s *SslPoliciesAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPoliciesListWarningData struct { +type SslPoliciesAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -45325,156 +47456,67 @@ type SslPoliciesListWarningData struct { NullFields []string `json:"-"` } -func (s *SslPoliciesListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesListWarningData +func (s *SslPoliciesAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPoliciesListAvailableFeaturesResponse struct { - Features []string `json:"features,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the +type SslPoliciesList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Features") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Features") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesListAvailableFeaturesResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SslPolicy: Represents an SSL Policy resource. Use SSL policies to -// control the SSL features, such as versions and cipher suites, offered -// by an HTTPS or SSL Proxy load balancer. For more information, read -// SSL Policy Concepts. -type SslPolicy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // CustomFeatures: A list of features enabled when the selected profile - // is CUSTOM. The method returns the set of features that can be - // specified in this list. This field must be empty if the profile is - // not CUSTOM. - CustomFeatures []string `json:"customFeatures,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // EnabledFeatures: [Output Only] The list of features enabled in the - // SSL policy. - EnabledFeatures []string `json:"enabledFeatures,omitempty"` - - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a SslPolicy. An up-to-date - // fingerprint must be provided in order to update the SslPolicy, - // otherwise the request will fail with error 412 conditionNotMet. To - // see the latest fingerprint, make a get() request to retrieve an - // SslPolicy. - Fingerprint string `json:"fingerprint,omitempty"` + Id string `json:"id,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // Items: A list of SslPolicy resources. + Items []*SslPolicy `json:"items,omitempty"` - // Kind: [Output only] Type of the resource. Always compute#sslPolicyfor - // SSL policies. + // Kind: [Output Only] Type of the resource. Always + // compute#sslPoliciesList for lists of sslPolicies. Kind string `json:"kind,omitempty"` - // MinTlsVersion: The minimum version of SSL protocol that can be used - // by the clients to establish a connection with the load balancer. This - // can be one of TLS_1_0, TLS_1_1, TLS_1_2. - // - // Possible values: - // "TLS_1_0" - TLS 1.0 - // "TLS_1_1" - TLS 1.1 - // "TLS_1_2" - TLS 1.2 - MinTlsVersion string `json:"minTlsVersion,omitempty"` - - // Name: Name of the resource. The name must be 1-63 characters long, - // and comply with RFC1035. Specifically, the name must be 1-63 - // characters long and match the regular expression - // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be - // a lowercase letter, and all following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` - - // Profile: Profile specifies the set of SSL features that can be used - // by the load balancer when negotiating SSL with clients. This can be - // one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, - // the set of SSL features to enable must be specified in the - // customFeatures field. - // - // Possible values: - // "COMPATIBLE" - Compatible profile. Allows the broadset set of - // clients, even those which support only out-of-date SSL features to - // negotiate with the load balancer. - // "CUSTOM" - Custom profile. Allow only the set of allowed SSL - // features specified in the customFeatures field. - // "MODERN" - Modern profile. Supports a wide set of SSL features, - // allowing modern clients to negotiate SSL with the load balancer. - // "RESTRICTED" - Restricted profile. Supports a reduced set of SSL - // features, intended to meet stricter compliance requirements. - Profile string `json:"profile,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. + // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // Warnings: [Output Only] If potential misconfigurations are detected - // for this SSL policy, this field will be populated with warning - // messages. - Warnings []*SslPolicyWarnings `json:"warnings,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *SslPoliciesListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SslPolicy) MarshalJSON() ([]byte, error) { - type NoMethod SslPolicy +func (s *SslPoliciesList) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPolicyWarnings struct { +// SslPoliciesListWarning: [Output Only] Informational warning message. +type SslPoliciesListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -45496,6 +47538,457 @@ type SslPolicyWarnings struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*SslPoliciesListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPoliciesListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPoliciesListAvailableFeaturesResponse struct { + Features []string `json:"features,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Features") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Features") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListAvailableFeaturesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPoliciesScopedList struct { + // SslPolicies: A list of SslPolicies contained in this scope. + SslPolicies []*SslPolicy `json:"sslPolicies,omitempty"` + + // Warning: Informational warning which replaces the list of SSL + // policies when the list is empty. + Warning *SslPoliciesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslPolicies") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SslPolicies") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SslPoliciesScopedListWarning: Informational warning which replaces +// the list of SSL policies when the list is empty. +type SslPoliciesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*SslPoliciesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPoliciesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SslPolicy: Represents an SSL Policy resource. Use SSL policies to +// control the SSL features, such as versions and cipher suites, offered +// by an HTTPS or SSL Proxy load balancer. For more information, read +// SSL Policy Concepts. +type SslPolicy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // CustomFeatures: A list of features enabled when the selected profile + // is CUSTOM. The method returns the set of features that can be + // specified in this list. This field must be empty if the profile is + // not CUSTOM. + CustomFeatures []string `json:"customFeatures,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // EnabledFeatures: [Output Only] The list of features enabled in the + // SSL policy. + EnabledFeatures []string `json:"enabledFeatures,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a SslPolicy. An up-to-date + // fingerprint must be provided in order to update the SslPolicy, + // otherwise the request will fail with error 412 conditionNotMet. To + // see the latest fingerprint, make a get() request to retrieve an + // SslPolicy. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output only] Type of the resource. Always compute#sslPolicyfor + // SSL policies. + Kind string `json:"kind,omitempty"` + + // MinTlsVersion: The minimum version of SSL protocol that can be used + // by the clients to establish a connection with the load balancer. This + // can be one of TLS_1_0, TLS_1_1, TLS_1_2. + // + // Possible values: + // "TLS_1_0" - TLS 1.0 + // "TLS_1_1" - TLS 1.1 + // "TLS_1_2" - TLS 1.2 + MinTlsVersion string `json:"minTlsVersion,omitempty"` + + // Name: Name of the resource. The name must be 1-63 characters long, + // and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + // a lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` + + // Profile: Profile specifies the set of SSL features that can be used + // by the load balancer when negotiating SSL with clients. This can be + // one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, + // the set of SSL features to enable must be specified in the + // customFeatures field. + // + // Possible values: + // "COMPATIBLE" - Compatible profile. Allows the broadset set of + // clients, even those which support only out-of-date SSL features to + // negotiate with the load balancer. + // "CUSTOM" - Custom profile. Allow only the set of allowed SSL + // features specified in the customFeatures field. + // "MODERN" - Modern profile. Supports a wide set of SSL features, + // allowing modern clients to negotiate SSL with the load balancer. + // "RESTRICTED" - Restricted profile. Supports a reduced set of SSL + // features, intended to meet stricter compliance requirements. + Profile string `json:"profile,omitempty"` + + // Region: [Output Only] URL of the region where the regional SSL policy + // resides. This field is not applicable to global SSL policies. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warnings: [Output Only] If potential misconfigurations are detected + // for this SSL policy, this field will be populated with warning + // messages. + Warnings []*SslPolicyWarnings `json:"warnings,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SslPolicy) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPolicyWarnings struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -45750,8 +48243,8 @@ type Subnetwork struct { // INTERNAL_HTTPS_LOAD_BALANCER. EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` - // ExternalIpv6Prefix: [Output Only] The external IPv6 address range - // that is assigned to this subnetwork. + // ExternalIpv6Prefix: The external IPv6 address range that is owned by + // this subnetwork. ExternalIpv6Prefix string `json:"externalIpv6Prefix,omitempty"` // Fingerprint: Fingerprint of this resource. A hash of the contents @@ -46018,6 +48511,9 @@ type SubnetworkAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -46204,6 +48700,9 @@ type SubnetworkListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -46529,6 +49028,9 @@ type SubnetworksScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -46711,32 +49213,45 @@ func (s *Subsetting) MarshalJSON() ([]byte, error) { } type TCPHealthCheck struct { - // Port: The TCP port number for the health check request. The default - // value is 80. Valid values are 1 through 65535. + // Port: The TCP port number to which the health check prober sends + // packets. The default value is 80. Valid values are 1 through 65535. Port int64 `json:"port,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. + // PortName: Not supported. PortName string `json:"portName,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, TCP health check follows behavior specified in port and - // portName fields. + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Not supported by + // target pools. The health check supports all backends supported by the + // backend service provided the backend can be health checked. For + // example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network + // endpoint groups, and instance group backends. USE_NAMED_PORT: Not + // supported. USE_SERVING_PORT: Provides an indirect method of + // specifying the health check port by referring to the backend service. + // Only supported by backend services for proxy load balancers. Not + // supported by target pools. Not supported by backend services for + // pass-through load balancers. Supports all backends that can be health + // checked; for example, GCE_VM_IP_PORT network endpoint groups and + // instance group backends. For GCE_VM_IP_PORT network endpoint group + // backends, the health check uses the port number specified for each + // endpoint in the network endpoint group. For instance group backends, + // the health check uses the port number determined by looking up the + // backend service's named port in the instance group's list of named + // ports. // // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. PortSpecification string `json:"portSpecification,omitempty"` // ProxyHeader: Specifies the type of proxy header to append before @@ -46748,15 +49263,16 @@ type TCPHealthCheck struct { // "PROXY_V1" ProxyHeader string `json:"proxyHeader,omitempty"` - // Request: The application data to send once the TCP connection has - // been established (default value is empty). If both request and - // response are empty, the connection establishment alone will indicate - // health. The request data can only be ASCII. + // Request: Instructs the health check prober to send this exact ASCII + // string, up to 1024 bytes in length, after establishing the TCP + // connection. Request string `json:"request,omitempty"` - // Response: The bytes to match against the beginning of the response - // data. If left empty (the default value), any response will indicate - // health. The response data can only be ASCII. + // Response: Creates a content-based TCP health check. In addition to + // establishing a TCP connection, you can configure the health check to + // pass only when the backend sends this exact response ASCII string, up + // to 1024 bytes in length. For details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-ssl-tcp Response string `json:"response,omitempty"` // ForceSendFields is a list of field names (e.g. "Port") to @@ -46990,6 +49506,9 @@ type TargetGrpcProxyListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -47156,6 +49675,9 @@ type TargetHttpProxiesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -47493,6 +50015,9 @@ type TargetHttpProxyListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -47659,6 +50184,9 @@ type TargetHttpsProxiesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -48099,6 +50627,9 @@ type TargetHttpsProxyAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -48286,6 +50817,9 @@ type TargetHttpsProxyListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -48435,8 +50969,9 @@ type TargetInstance struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // NatPolicy: NAT option controlling how IPs are NAT'ed to the instance. - // Currently only NO_NAT (default value) is supported. + // NatPolicy: Must have a value of NO_NAT. Protocol forwarding delivers + // packets while preserving the destination IP address of the forwarding + // rule referencing the target instance. // // Possible values: // "NO_NAT" - No NAT performed. @@ -48562,6 +51097,9 @@ type TargetInstanceAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -48748,6 +51286,9 @@ type TargetInstanceListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -48913,6 +51454,9 @@ type TargetInstancesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -49259,6 +51803,9 @@ type TargetPoolAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -49480,6 +52027,9 @@ type TargetPoolListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -49763,6 +52313,9 @@ type TargetPoolsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -50186,6 +52739,9 @@ type TargetSslProxyListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -50295,6 +52851,174 @@ func (s *TargetSslProxyListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type TargetTcpProxiesScopedList struct { + // TargetTcpProxies: A list of TargetTcpProxies contained in this scope. + TargetTcpProxies []*TargetTcpProxy `json:"targetTcpProxies,omitempty"` + + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *TargetTcpProxiesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetTcpProxies") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetTcpProxies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxiesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxiesScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetTcpProxiesScopedListWarning: Informational warning which +// replaces the list of backend services when the list is empty. +type TargetTcpProxiesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*TargetTcpProxiesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxiesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxiesScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetTcpProxiesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxiesScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type TargetTcpProxiesSetBackendServiceRequest struct { // Service: The URL of the new BackendService resource for the // targetTcpProxy. @@ -50406,6 +53130,10 @@ type TargetTcpProxy struct { // "PROXY_V1" ProxyHeader string `json:"proxyHeader,omitempty"` + // Region: [Output Only] URL of the region where the regional TCP proxy + // resides. This field is not applicable to global TCP proxy. + Region string `json:"region,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -50440,16 +53168,16 @@ func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetTcpProxyList: Contains a list of TargetTcpProxy resources. -type TargetTcpProxyList struct { +type TargetTcpProxyAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetTcpProxy resources. - Items []*TargetTcpProxy `json:"items,omitempty"` + // Items: A list of TargetTcpProxiesScopedList resources. + Items map[string]TargetTcpProxiesScopedList `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of resource. Always + // compute#targetTcpProxyAggregatedList for lists of Target TCP Proxies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -50463,8 +53191,11 @@ type TargetTcpProxyList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *TargetTcpProxyListWarning `json:"warning,omitempty"` + Warning *TargetTcpProxyAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -50487,15 +53218,15 @@ type TargetTcpProxyList struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxyList +func (s *TargetTcpProxyAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetTcpProxyListWarning: [Output Only] Informational warning -// message. -type TargetTcpProxyListWarning struct { +// TargetTcpProxyAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetTcpProxyAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -50517,6 +53248,9 @@ type TargetTcpProxyListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -50560,7 +53294,7 @@ type TargetTcpProxyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetTcpProxyListWarningData `json:"data,omitempty"` + Data []*TargetTcpProxyAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -50583,13 +53317,13 @@ type TargetTcpProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxyListWarning +func (s *TargetTcpProxyAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetTcpProxyListWarningData struct { +type TargetTcpProxyAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -50620,111 +53354,22 @@ type TargetTcpProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxyListWarningData +func (s *TargetTcpProxyAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGateway: Represents a Target VPN Gateway resource. The -// target VPN gateway resource represents a Classic Cloud VPN gateway. -// For more information, read the the Cloud VPN Overview. -type TargetVpnGateway struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule - // resources. ForwardingRules are created using - // compute.forwardingRules.insert and associated with a VPN gateway. - ForwardingRules []string `json:"forwardingRules,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: URL of the network to which this VPN gateway is attached. - // Provided by the client when the VPN gateway is created. - Network string `json:"network,omitempty"` - - // Region: [Output Only] URL of the region where the target VPN gateway - // resides. You must specify this field as part of the HTTP request URL. - // It is not settable as a field in the request body. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Status: [Output Only] The status of the VPN gateway, which can be one - // of the following: CREATING, READY, FAILED, or DELETING. - // - // Possible values: - // "CREATING" - // "DELETING" - // "FAILED" - // "READY" - Status string `json:"status,omitempty"` - - // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. - // VpnTunnels are created using the compute.vpntunnels.insert method and - // associated with a VPN gateway. - Tunnels []string `json:"tunnels,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGateway - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetVpnGatewayAggregatedList struct { +// TargetTcpProxyList: Contains a list of TargetTcpProxy resources. +type TargetTcpProxyList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetVpnGateway resources. - Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` + // Items: A list of TargetTcpProxy resources. + Items []*TargetTcpProxy `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -50738,11 +53383,289 @@ type TargetVpnGatewayAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // Unreachables: [Output Only] Unreachable resources. - Unreachables []string `json:"unreachables,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *TargetVpnGatewayAggregatedListWarning `json:"warning,omitempty"` + Warning *TargetTcpProxyListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetTcpProxyListWarning: [Output Only] Informational warning +// message. +type TargetTcpProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*TargetTcpProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetTcpProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGateway: Represents a Target VPN Gateway resource. The +// target VPN gateway resource represents a Classic Cloud VPN gateway. +// For more information, read the the Cloud VPN Overview. +type TargetVpnGateway struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule + // resources. ForwardingRules are created using + // compute.forwardingRules.insert and associated with a VPN gateway. + ForwardingRules []string `json:"forwardingRules,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: URL of the network to which this VPN gateway is attached. + // Provided by the client when the VPN gateway is created. + Network string `json:"network,omitempty"` + + // Region: [Output Only] URL of the region where the target VPN gateway + // resides. You must specify this field as part of the HTTP request URL. + // It is not settable as a field in the request body. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] The status of the VPN gateway, which can be one + // of the following: CREATING, READY, FAILED, or DELETING. + // + // Possible values: + // "CREATING" + // "DELETING" + // "FAILED" + // "READY" + Status string `json:"status,omitempty"` + + // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. + // VpnTunnels are created using the compute.vpntunnels.insert method and + // associated with a VPN gateway. + Tunnels []string `json:"tunnels,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGateway + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewayAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetVpnGateway resources. + Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetVpnGatewayAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -50795,6 +53718,9 @@ type TargetVpnGatewayAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -50982,6 +53908,9 @@ type TargetVpnGatewayListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -51148,6 +54077,9 @@ type TargetVpnGatewaysScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -51433,8 +54365,8 @@ type UrlMap struct { // any weightedBackendServices, defaultService must not be set. // Conversely if defaultService is set, defaultRouteAction cannot // contain any weightedBackendServices. Only one of defaultRouteAction - // or defaultUrlRedirect must be set. UrlMaps for external HTTP(S) load - // balancers support only the urlRewrite action within + // or defaultUrlRedirect must be set. URL maps for Classic external + // HTTP(S) load balancers only support the urlRewrite action within // defaultRouteAction. defaultRouteAction has no effect when the URL map // is bound to a target gRPC proxy that has the validateForProxyless // field set to true. @@ -51624,6 +54556,9 @@ type UrlMapListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -51974,6 +54909,9 @@ type UrlMapsAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -52138,6 +55076,9 @@ type UrlMapsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -52588,6 +55529,9 @@ type UsableSubnetworksAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -52796,6 +55740,10 @@ type VmEndpointNatMappingsInterfaceNatMappings struct { // field nat_ip_port_ranges. NumTotalNatPorts int64 `json:"numTotalNatPorts,omitempty"` + // RuleMappings: Information about mappings provided by rules in this + // NAT. + RuleMappings []*VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings `json:"ruleMappings,omitempty"` + // SourceAliasIpRange: Alias IP range for this interface endpoint. It // will be a private (RFC 1918) IP range. Examples: "10.33.4.55/32", or // "192.168.5.0/24". @@ -52829,6 +55777,59 @@ func (s *VmEndpointNatMappingsInterfaceNatMappings) MarshalJSON() ([]byte, error return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings: Contains +// information of NAT Mappings provided by a NAT Rule. +type VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings struct { + // DrainNatIpPortRanges: List of all drain IP:port-range mappings + // assigned to this interface by this rule. These ranges are inclusive, + // that is, both the first and the last ports can be used for NAT. + // Example: ["2.2.2.2:12345-12355", "1.1.1.1:2234-2234"]. + DrainNatIpPortRanges []string `json:"drainNatIpPortRanges,omitempty"` + + // NatIpPortRanges: A list of all IP:port-range mappings assigned to + // this interface by this rule. These ranges are inclusive, that is, + // both the first and the last ports can be used for NAT. Example: + // ["2.2.2.2:12345-12355", "1.1.1.1:2234-2234"]. + NatIpPortRanges []string `json:"natIpPortRanges,omitempty"` + + // NumTotalDrainNatPorts: Total number of drain ports across all NAT IPs + // allocated to this interface by this rule. It equals the aggregated + // port number in the field drain_nat_ip_port_ranges. + NumTotalDrainNatPorts int64 `json:"numTotalDrainNatPorts,omitempty"` + + // NumTotalNatPorts: Total number of ports across all NAT IPs allocated + // to this interface by this rule. It equals the aggregated port number + // in the field nat_ip_port_ranges. + NumTotalNatPorts int64 `json:"numTotalNatPorts,omitempty"` + + // RuleNumber: Rule number of the NAT Rule. + RuleNumber int64 `json:"ruleNumber,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "DrainNatIpPortRanges") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DrainNatIpPortRanges") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // VmEndpointNatMappingsList: Contains a list of VmEndpointNatMappings. type VmEndpointNatMappingsList struct { // Id: [Output Only] The unique identifier for the resource. This @@ -52909,6 +55910,9 @@ type VmEndpointNatMappingsListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -53077,7 +56081,8 @@ type VpnGateway struct { SelfLink string `json:"selfLink,omitempty"` // StackType: The stack type for this VPN gateway to identify the IP - // protocols that are enabled. If not specified, IPV4_ONLY will be used. + // protocols that are enabled. Possible values are: IPV4_ONLY, + // IPV4_IPV6. If not specified, IPV4_ONLY will be used. // // Possible values: // "IPV4_IPV6" - Enable VPN gateway with both IPv4 and IPv6 protocols. @@ -53196,6 +56201,9 @@ type VpnGatewayAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -53382,6 +56390,9 @@ type VpnGatewayListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -53664,9 +56675,9 @@ type VpnGatewayVpnGatewayInterface struct { // InterconnectAttachment: URL of the VLAN attachment // (interconnectAttachment) resource for this VPN gateway interface. // When the value of this field is present, the VPN gateway is used for - // IPsec-encrypted Cloud Interconnect; all egress or ingress traffic for + // HA VPN over Cloud Interconnect; all egress or ingress traffic for // this VPN gateway interface goes through the specified VLAN attachment - // resource. Not currently available publicly. + // resource. InterconnectAttachment string `json:"interconnectAttachment,omitempty"` // IpAddress: [Output Only] IP address for this VPN interface associated @@ -53674,11 +56685,11 @@ type VpnGatewayVpnGatewayInterface struct { // external IP address or a regional internal IP address. The two IP // addresses for a VPN gateway must be all regional external or regional // internal IP addresses. There cannot be a mix of regional external IP - // addresses and regional internal IP addresses. For IPsec-encrypted - // Cloud Interconnect, the IP addresses for both interfaces could either - // be regional internal IP addresses or regional external IP addresses. - // For regular (non IPsec-encrypted Cloud Interconnect) HA VPN tunnels, - // the IP address must be a regional external IP address. + // addresses and regional internal IP addresses. For HA VPN over Cloud + // Interconnect, the IP addresses for both interfaces could either be + // regional internal IP addresses or regional external IP addresses. For + // regular (non HA VPN over Cloud Interconnect) HA VPN tunnels, the IP + // address must be a regional external IP address. IpAddress string `json:"ipAddress,omitempty"` // ForceSendFields is a list of field names (e.g. "Id") to @@ -53790,6 +56801,9 @@ type VpnGatewaysScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -53950,7 +56964,9 @@ type VpnTunnel struct { // PeerExternalGatewayInterface: The interface ID of the external VPN // gateway to which this VPN tunnel is connected. Provided by the client - // when the VPN tunnel is created. + // when the VPN tunnel is created. Possible values are: `0`, `1`, `2`, + // `3`. The number of IDs in use depends on the external VPN gateway + // redundancy type. PeerExternalGatewayInterface int64 `json:"peerExternalGatewayInterface,omitempty"` // PeerGcpGateway: URL of the peer side HA GCP VPN gateway to which this @@ -54047,7 +57063,7 @@ type VpnTunnel struct { VpnGateway string `json:"vpnGateway,omitempty"` // VpnGatewayInterface: The interface ID of the VPN gateway with which - // this VPN tunnel is associated. + // this VPN tunnel is associated. Possible values are: `0`, `1`. VpnGatewayInterface int64 `json:"vpnGatewayInterface,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -54158,6 +57174,9 @@ type VpnTunnelAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -54344,6 +57363,9 @@ type VpnTunnelListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -54508,6 +57530,9 @@ type VpnTunnelsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -54817,6 +57842,9 @@ type XpnHostListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -55116,6 +58144,9 @@ type ZoneListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -55489,17 +58520,17 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &AcceleratorTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -55699,17 +58730,17 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &AcceleratorType{ ServerResponse: googleapi.ServerResponse{ @@ -55950,17 +58981,17 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &AcceleratorTypeList{ ServerResponse: googleapi.ServerResponse{ @@ -56250,17 +59281,17 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &AddressAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -56462,17 +59493,17 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -56638,17 +59669,17 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Address{ ServerResponse: googleapi.ServerResponse{ @@ -56816,17 +59847,17 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -57066,17 +60097,17 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &AddressList{ ServerResponse: googleapi.ServerResponse{ @@ -57176,6 +60207,195 @@ func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) erro } } +// method id "compute.addresses.setLabels": + +type AddressesSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an Address. To learn more about labels, +// read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - region: The region for this request. +// - resource: Name or id of the resource for this request. +func (r *AddressesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *AddressesSetLabelsCall { + c := &AddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *AddressesSetLabelsCall) RequestId(requestId string) *AddressesSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesSetLabelsCall) Fields(s ...googleapi.Field) *AddressesSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesSetLabelsCall) Context(ctx context.Context) *AddressesSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.addresses.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.autoscalers.aggregatedList": type AutoscalersAggregatedListCall struct { @@ -57366,17 +60586,17 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &AutoscalerAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -57578,17 +60798,17 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -57755,17 +60975,17 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Autoscaler{ ServerResponse: googleapi.ServerResponse{ @@ -57933,17 +61153,17 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -58183,17 +61403,17 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &AutoscalerList{ ServerResponse: googleapi.ServerResponse{ @@ -58410,17 +61630,17 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -58600,17 +61820,17 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -58689,9 +61909,9 @@ type BackendBucketsAddSignedUrlKeyCall struct { // AddSignedUrlKey: Adds a key for validating requests with signed URLs // for this backend bucket. // -// - backendBucket: Name of the BackendBucket resource to which the -// Signed URL Key should be added. The name should conform to RFC1035. -// - project: Project ID for this request. +// - backendBucket: Name of the BackendBucket resource to which the +// Signed URL Key should be added. The name should conform to RFC1035. +// - project: Project ID for this request. func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket string, signedurlkey *SignedUrlKey) *BackendBucketsAddSignedUrlKeyCall { c := &BackendBucketsAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -58784,17 +62004,17 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -58952,17 +62172,17 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -59031,10 +62251,10 @@ type BackendBucketsDeleteSignedUrlKeyCall struct { // DeleteSignedUrlKey: Deletes a key for validating requests with signed // URLs for this backend bucket. // -// - backendBucket: Name of the BackendBucket resource to which the -// Signed URL Key should be added. The name should conform to RFC1035. -// - keyName: The name of the Signed URL Key to delete. -// - project: Project ID for this request. +// - backendBucket: Name of the BackendBucket resource to which the +// Signed URL Key should be added. The name should conform to RFC1035. +// - keyName: The name of the Signed URL Key to delete. +// - project: Project ID for this request. func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -59122,17 +62342,17 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -59293,17 +62513,17 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendBucket{ ServerResponse: googleapi.ServerResponse{ @@ -59459,17 +62679,17 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -59697,17 +62917,17 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendBucketList{ ServerResponse: googleapi.ServerResponse{ @@ -59909,17 +63129,17 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -59992,9 +63212,9 @@ type BackendBucketsSetEdgeSecurityPolicyCall struct { // SetEdgeSecurityPolicy: Sets the edge security policy for the // specified backend bucket. // -// - backendBucket: Name of the BackendService resource to which the -// security policy should be set. The name should conform to RFC1035. -// - project: Project ID for this request. +// - backendBucket: Name of the BackendService resource to which the +// security policy should be set. The name should conform to RFC1035. +// - project: Project ID for this request. func (r *BackendBucketsService) SetEdgeSecurityPolicy(project string, backendBucket string, securitypolicyreference *SecurityPolicyReference) *BackendBucketsSetEdgeSecurityPolicyCall { c := &BackendBucketsSetEdgeSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -60087,17 +63307,17 @@ func (c *BackendBucketsSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -60263,17 +63483,17 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -60346,9 +63566,9 @@ type BackendServicesAddSignedUrlKeyCall struct { // AddSignedUrlKey: Adds a key for validating requests with signed URLs // for this backend service. // -// - backendService: Name of the BackendService resource to which the -// Signed URL Key should be added. The name should conform to RFC1035. -// - project: Project ID for this request. +// - backendService: Name of the BackendService resource to which the +// Signed URL Key should be added. The name should conform to RFC1035. +// - project: Project ID for this request. func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -60441,17 +63661,17 @@ func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -60699,17 +63919,17 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendServiceAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -60907,17 +64127,17 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -60986,10 +64206,10 @@ type BackendServicesDeleteSignedUrlKeyCall struct { // DeleteSignedUrlKey: Deletes a key for validating requests with signed // URLs for this backend service. // -// - backendService: Name of the BackendService resource to which the -// Signed URL Key should be added. The name should conform to RFC1035. -// - keyName: The name of the Signed URL Key to delete. -// - project: Project ID for this request. +// - backendService: Name of the BackendService resource to which the +// Signed URL Key should be added. The name should conform to RFC1035. +// - keyName: The name of the Signed URL Key to delete. +// - project: Project ID for this request. func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -61077,17 +64297,17 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -61248,17 +64468,17 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendService{ ServerResponse: googleapi.ServerResponse{ @@ -61325,9 +64545,9 @@ type BackendServicesGetHealthCall struct { // BackendService. Example request body: { "group": // "/zones/us-east1-b/instanceGroups/lb-backend-example" } // -// - backendService: Name of the BackendService resource to which the -// queried instance belongs. -// - project: . +// - backendService: Name of the BackendService resource to which the +// queried instance belongs. +// - project: . func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -61404,17 +64624,17 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendServiceGroupHealth{ ServerResponse: googleapi.ServerResponse{ @@ -61467,6 +64687,180 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen } +// method id "compute.backendServices.getIamPolicy": + +type BackendServicesGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *BackendServicesService) GetIamPolicy(project string, resource string) *BackendServicesGetIamPolicyCall { + c := &BackendServicesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *BackendServicesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *BackendServicesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesGetIamPolicyCall) Fields(s ...googleapi.Field) *BackendServicesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesGetIamPolicyCall) IfNoneMatch(entityTag string) *BackendServicesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesGetIamPolicyCall) Context(ctx context.Context) *BackendServicesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BackendServicesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/global/backendServices/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.backendServices.getIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.backendServices.insert": type BackendServicesInsertCall struct { @@ -61573,17 +64967,17 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -61811,17 +65205,17 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendServiceList{ ServerResponse: googleapi.ServerResponse{ @@ -62024,17 +65418,17 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -62107,10 +65501,10 @@ type BackendServicesSetEdgeSecurityPolicyCall struct { // SetEdgeSecurityPolicy: Sets the edge security policy for the // specified backend service. // -// - backendService: Name of the BackendService resource to which the -// edge security policy should be set. The name should conform to -// RFC1035. -// - project: Project ID for this request. +// - backendService: Name of the BackendService resource to which the +// edge security policy should be set. The name should conform to +// RFC1035. +// - project: Project ID for this request. func (r *BackendServicesService) SetEdgeSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetEdgeSecurityPolicyCall { c := &BackendServicesSetEdgeSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -62203,17 +65597,17 @@ func (c *BackendServicesSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -62270,6 +65664,162 @@ func (c *BackendServicesSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOpti } +// method id "compute.backendServices.setIamPolicy": + +type BackendServicesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *BackendServicesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *BackendServicesSetIamPolicyCall { + c := &BackendServicesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesSetIamPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesSetIamPolicyCall) Context(ctx context.Context) *BackendServicesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BackendServicesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/global/backendServices/{resource}/setIamPolicy", + // "httpMethod": "POST", + // "id": "compute.backendServices.setIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{resource}/setIamPolicy", + // "request": { + // "$ref": "GlobalSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.backendServices.setSecurityPolicy": type BackendServicesSetSecurityPolicyCall struct { @@ -62286,9 +65836,9 @@ type BackendServicesSetSecurityPolicyCall struct { // the specified backend service. For more information, see Google Cloud // Armor Overview // -// - backendService: Name of the BackendService resource to which the -// security policy should be set. The name should conform to RFC1035. -// - project: Project ID for this request. +// - backendService: Name of the BackendService resource to which the +// security policy should be set. The name should conform to RFC1035. +// - project: Project ID for this request. func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -62381,17 +65931,17 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -62558,17 +66108,17 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -62816,17 +66366,17 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &DiskTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -63027,17 +66577,17 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &DiskType{ ServerResponse: googleapi.ServerResponse{ @@ -63278,17 +66828,17 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &DiskTypeList{ ServerResponse: googleapi.ServerResponse{ @@ -63502,17 +67052,17 @@ func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -63768,17 +67318,17 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &DiskAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -63998,17 +67548,17 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -64187,17 +67737,17 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -64363,17 +67913,17 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Disk{ ServerResponse: googleapi.ServerResponse{ @@ -64543,17 +68093,17 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -64738,17 +68288,17 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -64993,17 +68543,17 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &DiskList{ ServerResponse: googleapi.ServerResponse{ @@ -65215,17 +68765,17 @@ func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -65404,17 +68954,17 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -65577,17 +69127,17 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -65761,17 +69311,17 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -65934,17 +69484,17 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -66107,17 +69657,17 @@ func (c *ExternalVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -66272,17 +69822,17 @@ func (c *ExternalVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*External if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ExternalVpnGateway{ ServerResponse: googleapi.ServerResponse{ @@ -66438,17 +69988,17 @@ func (c *ExternalVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -66676,17 +70226,17 @@ func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*Externa if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ExternalVpnGatewayList{ ServerResponse: googleapi.ServerResponse{ @@ -66871,17 +70421,17 @@ func (c *ExternalVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -67027,17 +70577,17 @@ func (c *ExternalVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOpt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -67206,17 +70756,17 @@ func (c *FirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -67375,17 +70925,17 @@ func (c *FirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -67539,17 +71089,17 @@ func (c *FirewallPoliciesCloneRulesCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -67698,17 +71248,17 @@ func (c *FirewallPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -67850,17 +71400,17 @@ func (c *FirewallPoliciesGetCall) Do(opts ...googleapi.CallOption) (*FirewallPol if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -67916,8 +71466,8 @@ type FirewallPoliciesGetAssociationCall struct { // GetAssociation: Gets an association with the specified name. // -// - firewallPolicy: Name of the firewall policy to which the queried -// rule belongs. +// - firewallPolicy: Name of the firewall policy to which the queried +// rule belongs. func (r *FirewallPoliciesService) GetAssociation(firewallPolicy string) *FirewallPoliciesGetAssociationCall { c := &FirewallPoliciesGetAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.firewallPolicy = firewallPolicy @@ -68006,17 +71556,17 @@ func (c *FirewallPoliciesGetAssociationCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyAssociation{ ServerResponse: googleapi.ServerResponse{ @@ -68167,17 +71717,17 @@ func (c *FirewallPoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Po if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -68239,8 +71789,8 @@ type FirewallPoliciesGetRuleCall struct { // GetRule: Gets a rule of the specified priority. // -// - firewallPolicy: Name of the firewall policy to which the queried -// rule belongs. +// - firewallPolicy: Name of the firewall policy to which the queried +// rule belongs. func (r *FirewallPoliciesService) GetRule(firewallPolicy string) *FirewallPoliciesGetRuleCall { c := &FirewallPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.firewallPolicy = firewallPolicy @@ -68329,17 +71879,17 @@ func (c *FirewallPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*Firewal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyRule{ ServerResponse: googleapi.ServerResponse{ @@ -68495,17 +72045,17 @@ func (c *FirewallPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -68728,17 +72278,17 @@ func (c *FirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*FirewallPo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyList{ ServerResponse: googleapi.ServerResponse{ @@ -68924,17 +72474,17 @@ func (c *FirewallPoliciesListAssociationsCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPoliciesListAssociationsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -69076,17 +72626,17 @@ func (c *FirewallPoliciesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -69243,17 +72793,17 @@ func (c *FirewallPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -69414,17 +72964,17 @@ func (c *FirewallPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -69585,17 +73135,17 @@ func (c *FirewallPoliciesRemoveAssociationCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -69751,17 +73301,17 @@ func (c *FirewallPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -69903,17 +73453,17 @@ func (c *FirewallPoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Po if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -70047,17 +73597,17 @@ func (c *FirewallPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -70204,17 +73754,17 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -70368,17 +73918,17 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Firewall{ ServerResponse: googleapi.ServerResponse{ @@ -70534,17 +74084,17 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -70772,17 +74322,17 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallList{ ServerResponse: googleapi.ServerResponse{ @@ -70984,17 +74534,17 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -71163,17 +74713,17 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -71421,17 +74971,17 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ForwardingRuleAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -71633,17 +75183,17 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -71809,17 +75359,17 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ForwardingRule{ ServerResponse: googleapi.ServerResponse{ @@ -71987,17 +75537,17 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -72237,17 +75787,17 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ForwardingRuleList{ ServerResponse: googleapi.ServerResponse{ @@ -72462,17 +76012,17 @@ func (c *ForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -72651,17 +76201,17 @@ func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -72743,10 +76293,10 @@ type ForwardingRulesSetTargetCall struct { // SetTarget: Changes target URL for forwarding rule. The new target // should be of the same type as the old target. // -// - forwardingRule: Name of the ForwardingRule resource in which target -// is to be set. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - forwardingRule: Name of the ForwardingRule resource in which target +// is to be set. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -72841,17 +76391,17 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -73018,17 +76568,17 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -73183,17 +76733,17 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Address{ ServerResponse: googleapi.ServerResponse{ @@ -73349,17 +76899,17 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -73586,17 +77136,17 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &AddressList{ ServerResponse: googleapi.ServerResponse{ @@ -73688,6 +77238,162 @@ func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList } } +// method id "compute.globalAddresses.setLabels": + +type GlobalAddressesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a GlobalAddress. To learn more about +// labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *GlobalAddressesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalAddressesSetLabelsCall { + c := &GlobalAddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalAddressesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalAddressesSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalAddressesSetLabelsCall) Context(ctx context.Context) *GlobalAddressesSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalAddressesSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.globalAddresses.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/global/addresses/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.globalAddresses.setLabels", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/addresses/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.globalForwardingRules.delete": type GlobalForwardingRulesDeleteCall struct { @@ -73789,17 +77495,17 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -73954,17 +77660,17 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ForwardingRule{ ServerResponse: googleapi.ServerResponse{ @@ -74120,17 +77826,17 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -74358,17 +78064,17 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ForwardingRuleList{ ServerResponse: googleapi.ServerResponse{ @@ -74571,17 +78277,17 @@ func (c *GlobalForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -74732,17 +78438,17 @@ func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -74810,9 +78516,9 @@ type GlobalForwardingRulesSetTargetCall struct { // SetTarget: Changes target URL for the GlobalForwardingRule resource. // The new target should be of the same type as the old target. // -// - forwardingRule: Name of the ForwardingRule resource in which target -// is to be set. -// - project: Project ID for this request. +// - forwardingRule: Name of the ForwardingRule resource in which target +// is to be set. +// - project: Project ID for this request. func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -74905,17 +78611,17 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -74988,10 +78694,10 @@ type GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall struct { // AttachNetworkEndpoints: Attach a network endpoint to the specified // network endpoint group. // -// - networkEndpointGroup: The name of the network endpoint group where -// you are attaching network endpoints to. It should comply with -// RFC1035. -// - project: Project ID for this request. +// - networkEndpointGroup: The name of the network endpoint group where +// you are attaching network endpoints to. It should comply with +// RFC1035. +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) AttachNetworkEndpoints(project string, networkEndpointGroup string, globalnetworkendpointgroupsattachendpointsrequest *GlobalNetworkEndpointGroupsAttachEndpointsRequest) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { c := &GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75084,17 +78790,17 @@ func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googl if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -75165,9 +78871,9 @@ type GlobalNetworkEndpointGroupsDeleteCall struct { // Delete: Deletes the specified network endpoint group.Note that the // NEG cannot be deleted if there are backend services referencing it. // -// - networkEndpointGroup: The name of the network endpoint group to -// delete. It should comply with RFC1035. -// - project: Project ID for this request. +// - networkEndpointGroup: The name of the network endpoint group to +// delete. It should comply with RFC1035. +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) Delete(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsDeleteCall { c := &GlobalNetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75254,17 +78960,17 @@ func (c *GlobalNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -75333,9 +79039,9 @@ type GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall struct { // DetachNetworkEndpoints: Detach the network endpoint from the // specified network endpoint group. // -// - networkEndpointGroup: The name of the network endpoint group where -// you are removing network endpoints. It should comply with RFC1035. -// - project: Project ID for this request. +// - networkEndpointGroup: The name of the network endpoint group where +// you are removing network endpoints. It should comply with RFC1035. +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) DetachNetworkEndpoints(project string, networkEndpointGroup string, globalnetworkendpointgroupsdetachendpointsrequest *GlobalNetworkEndpointGroupsDetachEndpointsRequest) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { c := &GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75428,17 +79134,17 @@ func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googl if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -75510,9 +79216,9 @@ type GlobalNetworkEndpointGroupsGetCall struct { // Get: Returns the specified network endpoint group. Gets a list of // available network endpoint groups by making a list() request. // -// - networkEndpointGroup: The name of the network endpoint group. It -// should comply with RFC1035. -// - project: Project ID for this request. +// - networkEndpointGroup: The name of the network endpoint group. It +// should comply with RFC1035. +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) Get(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsGetCall { c := &GlobalNetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75596,17 +79302,17 @@ func (c *GlobalNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ @@ -75761,17 +79467,17 @@ func (c *GlobalNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -75999,17 +79705,17 @@ func (c *GlobalNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroupList{ ServerResponse: googleapi.ServerResponse{ @@ -76115,10 +79821,10 @@ type GlobalNetworkEndpointGroupsListNetworkEndpointsCall struct { // ListNetworkEndpoints: Lists the network endpoints in the specified // network endpoint group. // -// - networkEndpointGroup: The name of the network endpoint group from -// which you want to generate a list of included network endpoints. It -// should comply with RFC1035. -// - project: Project ID for this request. +// - networkEndpointGroup: The name of the network endpoint group from +// which you want to generate a list of included network endpoints. It +// should comply with RFC1035. +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) ListNetworkEndpoints(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { c := &GlobalNetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -76273,17 +79979,17 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googlea if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroupsListNetworkEndpoints{ ServerResponse: googleapi.ServerResponse{ @@ -76572,17 +80278,17 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OperationAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -76759,7 +80465,7 @@ func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -76895,17 +80601,17 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -77134,17 +80840,17 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ @@ -77332,17 +81038,17 @@ func (c *GlobalOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -77476,7 +81182,7 @@ func (c *GlobalOrganizationOperationsDeleteCall) Do(opts ...googleapi.CallOption } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -77613,17 +81319,17 @@ func (c *GlobalOrganizationOperationsGetCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -77849,17 +81555,17 @@ func (c *GlobalOrganizationOperationsListCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ @@ -77959,9 +81665,9 @@ type GlobalPublicDelegatedPrefixesDeleteCall struct { // Delete: Deletes the specified global PublicDelegatedPrefix. // -// - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to delete. +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to delete. func (r *GlobalPublicDelegatedPrefixesService) Delete(project string, publicDelegatedPrefix string) *GlobalPublicDelegatedPrefixesDeleteCall { c := &GlobalPublicDelegatedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -78048,17 +81754,17 @@ func (c *GlobalPublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -78127,9 +81833,9 @@ type GlobalPublicDelegatedPrefixesGetCall struct { // Get: Returns the specified global PublicDelegatedPrefix resource. // -// - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to return. +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to return. func (r *GlobalPublicDelegatedPrefixesService) Get(project string, publicDelegatedPrefix string) *GlobalPublicDelegatedPrefixesGetCall { c := &GlobalPublicDelegatedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -78213,17 +81919,17 @@ func (c *GlobalPublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PublicDelegatedPrefix{ ServerResponse: googleapi.ServerResponse{ @@ -78379,17 +82085,17 @@ func (c *GlobalPublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -78616,17 +82322,17 @@ func (c *GlobalPublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PublicDelegatedPrefixList{ ServerResponse: googleapi.ServerResponse{ @@ -78734,9 +82440,9 @@ type GlobalPublicDelegatedPrefixesPatchCall struct { // with the data included in the request. This method supports PATCH // semantics and uses JSON merge patch format and processing rules. // -// - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to patch. +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to patch. func (r *GlobalPublicDelegatedPrefixesService) Patch(project string, publicDelegatedPrefix string, publicdelegatedprefix *PublicDelegatedPrefix) *GlobalPublicDelegatedPrefixesPatchCall { c := &GlobalPublicDelegatedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -78829,17 +82535,17 @@ func (c *GlobalPublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -79088,17 +82794,17 @@ func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Heal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HealthChecksAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -79296,17 +83002,17 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -79461,17 +83167,17 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HealthCheck{ ServerResponse: googleapi.ServerResponse{ @@ -79627,17 +83333,17 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -79865,17 +83571,17 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HealthCheckList{ ServerResponse: googleapi.ServerResponse{ @@ -80077,17 +83783,17 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -80254,17 +83960,17 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -80423,17 +84129,17 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -80588,17 +84294,17 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HttpHealthCheck{ ServerResponse: googleapi.ServerResponse{ @@ -80754,17 +84460,17 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -80992,17 +84698,17 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HttpHealthCheckList{ ServerResponse: googleapi.ServerResponse{ @@ -81204,17 +84910,17 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -81381,17 +85087,17 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -81550,17 +85256,17 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -81715,17 +85421,17 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HttpsHealthCheck{ ServerResponse: googleapi.ServerResponse{ @@ -81881,17 +85587,17 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -82119,17 +85825,17 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HttpsHealthCheckList{ ServerResponse: googleapi.ServerResponse{ @@ -82331,17 +86037,17 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -82508,17 +86214,17 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -82680,17 +86386,17 @@ func (c *ImageFamilyViewsGetCall) Do(opts ...googleapi.CallOption) (*ImageFamily if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ImageFamilyView{ ServerResponse: googleapi.ServerResponse{ @@ -82850,17 +86556,17 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -83024,17 +86730,17 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -83192,17 +86898,17 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Image{ ServerResponse: googleapi.ServerResponse{ @@ -83353,17 +87059,17 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Image{ ServerResponse: googleapi.ServerResponse{ @@ -83521,17 +87227,17 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -83700,17 +87406,17 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -83951,17 +87657,17 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ImageList{ ServerResponse: googleapi.ServerResponse{ @@ -84163,17 +87869,17 @@ func (c *ImagesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -84324,17 +88030,17 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -84480,17 +88186,17 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -84636,17 +88342,17 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -84727,10 +88433,10 @@ type InstanceGroupManagersAbandonInstancesCall struct { // elapsed before the VM instance is removed or deleted. You can specify // a maximum of 1000 instances with this method per request. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -84825,17 +88531,17 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -85091,17 +88797,17 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupManagerAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -85215,11 +88921,11 @@ type InstanceGroupManagersApplyUpdatesToInstancesCall struct { // managed instance group. This method can be used to apply new // overrides and/or new versions. // -// - instanceGroupManager: The name of the managed instance group, -// should conform to RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. Should conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group, +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. Should conform to RFC1035. func (r *InstanceGroupManagersService) ApplyUpdatesToInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest) *InstanceGroupManagersApplyUpdatesToInstancesCall { c := &InstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85298,17 +89004,17 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi. if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -85387,11 +89093,11 @@ type InstanceGroupManagersCreateInstancesCall struct { // actions take additional time. You must separately verify the status // of the creating or actions with the listmanagedinstances method. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) CreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagerscreateinstancesrequest *InstanceGroupManagersCreateInstancesRequest) *InstanceGroupManagersCreateInstancesCall { c := &InstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85485,17 +89191,17 @@ func (c *InstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -85576,11 +89282,11 @@ type InstanceGroupManagersDeleteCall struct { // to a backend service. Read Deleting an instance group for more // information. // -// - instanceGroupManager: The name of the managed instance group to -// delete. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group to +// delete. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85669,17 +89375,17 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -85766,10 +89472,10 @@ type InstanceGroupManagersDeleteInstancesCall struct { // VM instance is removed or deleted. You can specify a maximum of 1000 // instances with this method per request. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85864,17 +89570,17 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -85954,11 +89660,11 @@ type InstanceGroupManagersDeletePerInstanceConfigsCall struct { // DeletePerInstanceConfigs: Deletes selected per-instance // configurations for the managed instance group. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) DeletePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteperinstanceconfigsreq *InstanceGroupManagersDeletePerInstanceConfigsReq) *InstanceGroupManagersDeletePerInstanceConfigsCall { c := &InstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86037,17 +89743,17 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -86123,10 +89829,10 @@ type InstanceGroupManagersGetCall struct { // group. Gets a list of available managed instance groups by making a // list() request. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86212,17 +89918,17 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupManager{ ServerResponse: googleapi.ServerResponse{ @@ -86301,9 +90007,9 @@ type InstanceGroupManagersInsertCall struct { // 1000 VM instances per group. Please contact Cloud Support if you need // an increase in this limit. // -// - project: Project ID for this request. -// - zone: The name of the zone where you want to create the managed -// instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where you want to create the managed +// instance group. func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86396,17 +90102,17 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -86478,9 +90184,9 @@ type InstanceGroupManagersListCall struct { // List: Retrieves a list of managed instance groups that are contained // within the specified project and zone. // -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86646,17 +90352,17 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ @@ -86772,13 +90478,13 @@ type InstanceGroupManagersListErrorsCall struct { // given managed instance group. The filter and orderBy query parameters // are not supported. // -// - instanceGroupManager: The name of the managed instance group. It -// must be a string that meets the requirements in RFC1035, or an -// unsigned long integer: must match regexp pattern: (?:a-z -// (?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// must be a string that meets the requirements in RFC1035, or an +// unsigned long integer: must match regexp pattern: (?:a-z +// (?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) ListErrors(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListErrorsCall { c := &InstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86947,17 +90653,17 @@ func (c *InstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupManagersListErrorsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -87081,12 +90787,14 @@ type InstanceGroupManagersListManagedInstancesCall struct { // the instance. For example, if the group is still creating an // instance, the currentAction is CREATING. If a previous action failed, // the list displays the errors for that failed action. The orderBy -// query parameter is not supported. -// -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// query parameter is not supported. The `pageToken` query parameter is +// supported only in the alpha and beta API and only if the group's +// `listManagedInstancesResults` field is set to `PAGINATED`. +// +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87243,17 +90951,17 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupManagersListManagedInstancesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -87267,7 +90975,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported.", + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.listManagedInstances", @@ -87375,11 +91083,11 @@ type InstanceGroupManagersListPerInstanceConfigsCall struct { // defined for the managed instance group. The orderBy query parameter // is not supported. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) ListPerInstanceConfigs(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListPerInstanceConfigsCall { c := &InstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87525,7 +91233,9 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) // error will be non-nil. Any non-2xx status code is an error. Response // headers are in either // *InstanceGroupManagersListPerInstanceConfigsResp.ServerResponse.Header -// or (if a response was returned at all) in +// +// or (if a response was returned at all) in +// // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was // returned. @@ -87536,17 +91246,17 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupManagersListPerInstanceConfigsResp{ ServerResponse: googleapi.ServerResponse{ @@ -87677,10 +91387,10 @@ type InstanceGroupManagersPatchCall struct { // state of that VM. To learn how to apply an updated configuration to // the VMs in a MIG, see Updating instances in a MIG. // -// - instanceGroupManager: The name of the instance group manager. -// - project: Project ID for this request. -// - zone: The name of the zone where you want to create the managed -// instance group. +// - instanceGroupManager: The name of the instance group manager. +// - project: Project ID for this request. +// - zone: The name of the zone where you want to create the managed +// instance group. func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87775,17 +91485,17 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -87867,11 +91577,11 @@ type InstanceGroupManagersPatchPerInstanceConfigsCall struct { // serves as a key used to distinguish whether to perform insert or // patch. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) PatchPerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagerspatchperinstanceconfigsreq *InstanceGroupManagersPatchPerInstanceConfigsReq) *InstanceGroupManagersPatchPerInstanceConfigsCall { c := &InstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87966,17 +91676,17 @@ func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi. if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -88065,10 +91775,10 @@ type InstanceGroupManagersRecreateInstancesCall struct { // before the VM instance is removed or deleted. You can specify a // maximum of 1000 instances with this method per request. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88163,17 +91873,17 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -88266,14 +91976,14 @@ type InstanceGroupManagersResizeCall struct { // seconds after the connection draining duration has elapsed before the // VM instance is removed or deleted. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - size: The number of running instances that the managed instance -// group should maintain at any given time. The group automatically -// adds or removes instances to maintain the number of instances -// specified by this parameter. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - size: The number of running instances that the managed instance +// group should maintain at any given time. The group automatically +// adds or removes instances to maintain the number of instances +// specified by this parameter. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88363,17 +92073,17 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -88461,10 +92171,10 @@ type InstanceGroupManagersSetInstanceTemplateCall struct { // recreateInstances, run applyUpdatesToInstances, or set the group's // updatePolicy.type to PROACTIVE. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88559,17 +92269,17 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -88654,10 +92364,10 @@ type InstanceGroupManagersSetTargetPoolsCall struct { // change might take some time to apply to all of the instances in the // group depending on the size of the group. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88752,17 +92462,17 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -88844,11 +92554,11 @@ type InstanceGroupManagersUpdatePerInstanceConfigsCall struct { // serves as a key used to distinguish whether to perform insert or // patch. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) UpdatePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq) *InstanceGroupManagersUpdatePerInstanceConfigsCall { c := &InstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88943,17 +92653,17 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -89034,10 +92744,10 @@ type InstanceGroupsAddInstancesCall struct { // group. All of the instances in the instance group must be in the same // network/subnetwork. Read Adding instances for more information. // -// - instanceGroup: The name of the instance group where you are adding -// instances. -// - project: Project ID for this request. -// - zone: The name of the zone where the instance group is located. +// - instanceGroup: The name of the instance group where you are adding +// instances. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89132,17 +92842,17 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -89397,17 +93107,17 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -89612,17 +93322,17 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -89789,17 +93499,17 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroup{ ServerResponse: googleapi.ServerResponse{ @@ -89871,9 +93581,9 @@ type InstanceGroupsInsertCall struct { // Insert: Creates an instance group in the specified project using the // parameters that are included in the request. // -// - project: Project ID for this request. -// - zone: The name of the zone where you want to create the instance -// group. +// - project: Project ID for this request. +// - zone: The name of the zone where you want to create the instance +// group. func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89966,17 +93676,17 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -90216,17 +93926,17 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupList{ ServerResponse: googleapi.ServerResponse{ @@ -90343,10 +94053,10 @@ type InstanceGroupsListInstancesCall struct { // parameter is supported, but only for expressions that use `eq` // (equal) or `ne` (not equal) operators. // -// - instanceGroup: The name of the instance group from which you want -// to generate a list of included instances. -// - project: Project ID for this request. -// - zone: The name of the zone where the instance group is located. +// - instanceGroup: The name of the instance group from which you want +// to generate a list of included instances. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90507,17 +94217,17 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ @@ -90645,10 +94355,10 @@ type InstanceGroupsRemoveInstancesCall struct { // can take up to 60 seconds after the connection draining duration // before the VM instance is removed or deleted. // -// - instanceGroup: The name of the instance group where the specified -// instances will be removed. -// - project: Project ID for this request. -// - zone: The name of the zone where the instance group is located. +// - instanceGroup: The name of the instance group where the specified +// instances will be removed. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90743,17 +94453,17 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -90832,10 +94542,10 @@ type InstanceGroupsSetNamedPortsCall struct { // SetNamedPorts: Sets the named ports for the specified instance group. // -// - instanceGroup: The name of the instance group where the named ports -// are updated. -// - project: Project ID for this request. -// - zone: The name of the zone where the instance group is located. +// - instanceGroup: The name of the instance group where the named ports +// are updated. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90930,17 +94640,17 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -91107,17 +94817,17 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -91272,17 +94982,17 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceTemplate{ ServerResponse: googleapi.ServerResponse{ @@ -91440,17 +95150,17 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -91615,17 +95325,17 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -91853,17 +95563,17 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceTemplateList{ ServerResponse: googleapi.ServerResponse{ @@ -92048,17 +95758,17 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -92204,17 +95914,17 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -92284,11 +95994,11 @@ type InstancesAddAccessConfigCall struct { // AddAccessConfig: Adds an access config to an instance's network // interface. // -// - instance: The instance name for this request. -// - networkInterface: The name of the network interface to add to this -// instance. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. +// - instance: The instance name for this request. +// - networkInterface: The name of the network interface to add to this +// instance. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -92384,17 +96094,17 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -92581,17 +96291,17 @@ func (c *InstancesAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -92850,17 +96560,17 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -93081,17 +96791,17 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -93175,7 +96885,8 @@ type InstancesBulkInsertCall struct { } // BulkInsert: Creates multiple instances. Count specifies the number of -// instances to create. +// instances to create. For more information, see About bulk creation of +// VMs. // // - project: Project ID for this request. // - zone: The name of the zone for this request. @@ -93271,17 +96982,17 @@ func (c *InstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -93295,7 +97006,7 @@ func (c *InstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates multiple instances. Count specifies the number of instances to create.", + // "description": "Creates multiple instances. Count specifies the number of instances to create. For more information, see About bulk creation of VMs.", // "flatPath": "projects/{project}/zones/{zone}/instances/bulkInsert", // "httpMethod": "POST", // "id": "compute.instances.bulkInsert", @@ -93445,17 +97156,17 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -93628,17 +97339,17 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -93729,12 +97440,12 @@ type InstancesDetachDiskCall struct { // DetachDisk: Detaches a disk from an instance. // -// - deviceName: The device name of the disk to detach. Make a get() -// request on the instance to view currently attached disks and device -// names. -// - instance: Instance name for this request. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. +// - deviceName: The device name of the disk to detach. Make a get() +// request on the instance to view currently attached disks and device +// names. +// - instance: Instance name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -93824,17 +97535,17 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -94008,17 +97719,17 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Instance{ ServerResponse: googleapi.ServerResponse{ @@ -94093,11 +97804,11 @@ type InstancesGetEffectiveFirewallsCall struct { // GetEffectiveFirewalls: Returns effective firewalls applied to an // interface of the instance. // -// - instance: Name of the instance scoping this request. -// - networkInterface: The name of the network interface to get the -// effective firewalls. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. +// - instance: Name of the instance scoping this request. +// - networkInterface: The name of the network interface to get the +// effective firewalls. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) GetEffectiveFirewalls(project string, zone string, instance string, networkInterface string) *InstancesGetEffectiveFirewallsCall { c := &InstancesGetEffectiveFirewallsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -94185,17 +97896,17 @@ func (c *InstancesGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstancesGetEffectiveFirewallsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -94378,17 +98089,17 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &GuestAttributes{ ServerResponse: googleapi.ServerResponse{ @@ -94568,17 +98279,17 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -94746,17 +98457,17 @@ func (c *InstancesGetScreenshotCall) Do(opts ...googleapi.CallOption) (*Screensh if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Screenshot{ ServerResponse: googleapi.ServerResponse{ @@ -94944,17 +98655,17 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SerialPortOutput{ ServerResponse: googleapi.ServerResponse{ @@ -95132,17 +98843,17 @@ func (c *InstancesGetShieldedInstanceIdentityCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ShieldedInstanceIdentity{ ServerResponse: googleapi.ServerResponse{ @@ -95336,17 +99047,17 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -95596,17 +99307,17 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceList{ ServerResponse: googleapi.ServerResponse{ @@ -95725,10 +99436,10 @@ type InstancesListReferrersCall struct { // includes the instance group. For more information, read Viewing // referrers to VM instances. // -// - instance: Name of the target instance scoping this request, or '-' -// if the request should span over all instances in the container. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. +// - instance: Name of the target instance scoping this request, or '-' +// if the request should span over all instances in the container. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -95896,17 +99607,17 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceListReferrers{ ServerResponse: googleapi.ServerResponse{ @@ -96126,17 +99837,17 @@ func (c *InstancesRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -96309,17 +100020,17 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -96488,17 +100199,17 @@ func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -96645,7 +100356,7 @@ func (c *InstancesSendDiagnosticInterruptCall) Do(opts ...googleapi.CallOption) } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -96802,17 +100513,17 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -96896,14 +100607,14 @@ type InstancesSetDiskAutoDeleteCall struct { // SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to // an instance. // -// - autoDelete: Whether to auto-delete the disk when the instance is -// deleted. -// - deviceName: The device name of the disk to modify. Make a get() -// request on the instance to view currently attached disks and device -// names. -// - instance: The instance name for this request. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. +// - autoDelete: Whether to auto-delete the disk when the instance is +// deleted. +// - deviceName: The device name of the disk to modify. Make a get() +// request on the instance to view currently attached disks and device +// names. +// - instance: The instance name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -96994,17 +100705,17 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -97179,17 +100890,17 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -97363,17 +101074,17 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -97552,17 +101263,17 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -97741,17 +101452,17 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -97930,17 +101641,17 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -98121,17 +101832,17 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -98314,17 +102025,17 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -98504,17 +102215,17 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -98695,17 +102406,17 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -98884,17 +102595,17 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -99050,17 +102761,17 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -99224,17 +102935,17 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -99411,17 +103122,17 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -99517,6 +103228,14 @@ func (r *InstancesService) Stop(project string, zone string, instance string) *I return c } +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If +// true, discard the contents of any attached localSSD partitions. +// Default value is false. +func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { + c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) + return c +} + // RequestId sets the optional parameter "requestId": An optional // request ID to identify requests. Specify a unique request ID so that // if you must retry your request, the server will know to ignore the @@ -99597,17 +103316,17 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -99631,6 +103350,11 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "instance" // ], // "parameters": { + // "discardLocalSsd": { + // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + // "location": "query", + // "type": "boolean" + // }, // "instance": { // "description": "Name of the instance resource to stop.", // "location": "path", @@ -99702,6 +103426,14 @@ func (r *InstancesService) Suspend(project string, zone string, instance string) return c } +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If +// true, discard the contents of any attached localSSD partitions. +// Default value is false. +func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { + c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) + return c +} + // RequestId sets the optional parameter "requestId": An optional // request ID to identify requests. Specify a unique request ID so that // if you must retry your request, the server will know to ignore the @@ -99782,17 +103514,17 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -99816,6 +103548,11 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err // "instance" // ], // "parameters": { + // "discardLocalSsd": { + // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + // "location": "query", + // "type": "boolean" + // }, // "instance": { // "description": "Name of the instance resource to suspend.", // "location": "path", @@ -99952,17 +103689,17 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -100060,10 +103797,11 @@ func (r *InstancesService) Update(project string, zone string, instance string, // acts based on the minimum action that the updated properties require. // // Possible values: -// "INVALID" -// "NO_EFFECT" - No changes can be made to the instance. -// "REFRESH" - The instance will not restart. -// "RESTART" - The instance will restart. +// +// "INVALID" +// "NO_EFFECT" - No changes can be made to the instance. +// "REFRESH" - The instance will not restart. +// "RESTART" - The instance will restart. func (c *InstancesUpdateCall) MinimalAction(minimalAction string) *InstancesUpdateCall { c.urlParams_.Set("minimalAction", minimalAction) return c @@ -100077,10 +103815,11 @@ func (c *InstancesUpdateCall) MinimalAction(minimalAction string) *InstancesUpda // lowest to highest are NO_EFFECT, REFRESH, and RESTART. // // Possible values: -// "INVALID" -// "NO_EFFECT" - No changes can be made to the instance. -// "REFRESH" - The instance will not restart. -// "RESTART" - The instance will restart. +// +// "INVALID" +// "NO_EFFECT" - No changes can be made to the instance. +// "REFRESH" - The instance will not restart. +// "RESTART" - The instance will restart. func (c *InstancesUpdateCall) MostDisruptiveAllowedAction(mostDisruptiveAllowedAction string) *InstancesUpdateCall { c.urlParams_.Set("mostDisruptiveAllowedAction", mostDisruptiveAllowedAction) return c @@ -100171,17 +103910,17 @@ func (c *InstancesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -100299,11 +104038,11 @@ type InstancesUpdateAccessConfigCall struct { // This method supports PATCH semantics and uses the JSON merge patch // format and processing rules. // -// - instance: The instance name for this request. -// - networkInterface: The name of the network interface where the -// access config is attached. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. +// - instance: The instance name for this request. +// - networkInterface: The name of the network interface where the +// access config is attached. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -100399,17 +104138,17 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -100597,17 +104336,17 @@ func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -100792,17 +104531,17 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -100990,17 +104729,17 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) Do(opts ...googleapi.CallOpt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -101258,17 +104997,17 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InterconnectAttachmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -101379,10 +105118,10 @@ type InterconnectAttachmentsDeleteCall struct { // Delete: Deletes the specified interconnect attachment. // -// - interconnectAttachment: Name of the interconnect attachment to -// delete. -// - project: Project ID for this request. -// - region: Name of the region for this request. +// - interconnectAttachment: Name of the interconnect attachment to +// delete. +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -101471,17 +105210,17 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -101559,10 +105298,10 @@ type InterconnectAttachmentsGetCall struct { // Get: Returns the specified interconnect attachment. // -// - interconnectAttachment: Name of the interconnect attachment to -// return. -// - project: Project ID for this request. -// - region: Name of the region for this request. +// - interconnectAttachment: Name of the interconnect attachment to +// return. +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -101648,17 +105387,17 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InterconnectAttachment{ ServerResponse: googleapi.ServerResponse{ @@ -101833,17 +105572,17 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -102088,17 +105827,17 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InterconnectAttachmentList{ ServerResponse: googleapi.ServerResponse{ @@ -102215,10 +105954,10 @@ type InterconnectAttachmentsPatchCall struct { // included in the request. This method supports PATCH semantics and // uses the JSON merge patch format and processing rules. // -// - interconnectAttachment: Name of the interconnect attachment to -// patch. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - interconnectAttachment: Name of the interconnect attachment to +// patch. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -102313,17 +106052,17 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -102389,6 +106128,195 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op } +// method id "compute.interconnectAttachments.setLabels": + +type InterconnectAttachmentsSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an InterconnectAttachment. To learn +// more about labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - region: The region for this request. +// - resource: Name or id of the resource for this request. +func (r *InterconnectAttachmentsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *InterconnectAttachmentsSetLabelsCall { + c := &InterconnectAttachmentsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InterconnectAttachmentsSetLabelsCall) RequestId(requestId string) *InterconnectAttachmentsSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectAttachmentsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectAttachmentsSetLabelsCall) Context(ctx context.Context) *InterconnectAttachmentsSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectAttachmentsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectAttachments.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.interconnectAttachments.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.interconnectLocations.get": type InterconnectLocationsGetCall struct { @@ -102490,17 +106418,17 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InterconnectLocation{ ServerResponse: googleapi.ServerResponse{ @@ -102729,17 +106657,17 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InterconnectLocationList{ ServerResponse: googleapi.ServerResponse{ @@ -102842,7 +106770,7 @@ type InterconnectsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified interconnect. +// Delete: Deletes the specified Interconnect. // // - interconnect: Name of the interconnect to delete. // - project: Project ID for this request. @@ -102932,17 +106860,17 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -102956,7 +106884,7 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified interconnect.", + // "description": "Deletes the specified Interconnect.", // "flatPath": "projects/{project}/global/interconnects/{interconnect}", // "httpMethod": "DELETE", // "id": "compute.interconnects.delete", @@ -103009,8 +106937,8 @@ type InterconnectsGetCall struct { header_ http.Header } -// Get: Returns the specified interconnect. Get a list of available -// interconnects by making a list() request. +// Get: Returns the specified Interconnect. Get a list of available +// Interconnects by making a list() request. // // - interconnect: Name of the interconnect to return. // - project: Project ID for this request. @@ -103097,17 +107025,17 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Interconnect{ ServerResponse: googleapi.ServerResponse{ @@ -103121,7 +107049,7 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, } return ret, nil // { - // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + // "description": "Returns the specified Interconnect. Get a list of available Interconnects by making a list() request.", // "flatPath": "projects/{project}/global/interconnects/{interconnect}", // "httpMethod": "GET", // "id": "compute.interconnects.get", @@ -103171,7 +107099,7 @@ type InterconnectsGetDiagnosticsCall struct { } // GetDiagnostics: Returns the interconnectDiagnostics for the specified -// interconnect. +// Interconnect. // // - interconnect: Name of the interconnect resource to query. // - project: Project ID for this request. @@ -103259,17 +107187,17 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InterconnectsGetDiagnosticsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -103283,7 +107211,7 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int } return ret, nil // { - // "description": "Returns the interconnectDiagnostics for the specified interconnect.", + // "description": "Returns the interconnectDiagnostics for the specified Interconnect.", // "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", // "httpMethod": "GET", // "id": "compute.interconnects.getDiagnostics", @@ -103331,7 +107259,7 @@ type InterconnectsInsertCall struct { header_ http.Header } -// Insert: Creates a Interconnect in the specified project using the +// Insert: Creates an Interconnect in the specified project using the // data included in the request. // // - project: Project ID for this request. @@ -103425,17 +107353,17 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -103449,7 +107377,7 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a Interconnect in the specified project using the data included in the request.", + // "description": "Creates an Interconnect in the specified project using the data included in the request.", // "flatPath": "projects/{project}/global/interconnects", // "httpMethod": "POST", // "id": "compute.interconnects.insert", @@ -103496,7 +107424,7 @@ type InterconnectsListCall struct { header_ http.Header } -// List: Retrieves the list of interconnect available to the specified +// List: Retrieves the list of Interconnects available to the specified // project. // // - project: Project ID for this request. @@ -103663,17 +107591,17 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InterconnectList{ ServerResponse: googleapi.ServerResponse{ @@ -103687,7 +107615,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL } return ret, nil // { - // "description": "Retrieves the list of interconnect available to the specified project.", + // "description": "Retrieves the list of Interconnects available to the specified project.", // "flatPath": "projects/{project}/global/interconnects", // "httpMethod": "GET", // "id": "compute.interconnects.list", @@ -103777,7 +107705,7 @@ type InterconnectsPatchCall struct { header_ http.Header } -// Patch: Updates the specified interconnect with the data included in +// Patch: Updates the specified Interconnect with the data included in // the request. This method supports PATCH semantics and uses the JSON // merge patch format and processing rules. // @@ -103875,17 +107803,17 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -103899,7 +107827,7 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the specified Interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "flatPath": "projects/{project}/global/interconnects/{interconnect}", // "httpMethod": "PATCH", // "id": "compute.interconnects.patch", @@ -103943,6 +107871,162 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e } +// method id "compute.interconnects.setLabels": + +type InterconnectsSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an Interconnect. To learn more about +// labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *InterconnectsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *InterconnectsSetLabelsCall { + c := &InterconnectsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsSetLabelsCall) Context(ctx context.Context) *InterconnectsSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/global/interconnects/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.interconnects.setLabels", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/interconnects/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.licenseCodes.get": type LicenseCodesGetCall struct { @@ -103960,9 +108044,9 @@ type LicenseCodesGetCall struct { // *Caution* This resource is intended for use only by third-party // partners who are creating Cloud Marketplace images. // -// - licenseCode: Number corresponding to the License code resource to -// return. -// - project: Project ID for this request. +// - licenseCode: Number corresponding to the License code resource to +// return. +// - project: Project ID for this request. func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -104046,17 +108130,17 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &LicenseCode{ ServerResponse: googleapi.ServerResponse{ @@ -104201,17 +108285,17 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -104368,17 +108452,17 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -104534,17 +108618,17 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &License{ ServerResponse: googleapi.ServerResponse{ @@ -104704,17 +108788,17 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -104877,17 +108961,17 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -105124,17 +109208,17 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &LicensesListResponse{ ServerResponse: googleapi.ServerResponse{ @@ -105321,17 +109405,17 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -105478,17 +109562,17 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -105644,17 +109728,17 @@ func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -105809,17 +109893,17 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &MachineImage{ ServerResponse: googleapi.ServerResponse{ @@ -105977,17 +110061,17 @@ func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -106160,17 +110244,17 @@ func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -106403,17 +110487,17 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &MachineImageList{ ServerResponse: googleapi.ServerResponse{ @@ -106598,17 +110682,17 @@ func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -106754,17 +110838,17 @@ func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -107008,17 +111092,17 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &MachineTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -107219,17 +111303,17 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &MachineType{ ServerResponse: googleapi.ServerResponse{ @@ -107470,17 +111554,17 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &MachineTypeList{ ServerResponse: googleapi.ServerResponse{ @@ -107580,9 +111664,9 @@ func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeLis } } -// method id "compute.networkEdgeSecurityServices.aggregatedList": +// method id "compute.networkAttachments.aggregatedList": -type NetworkEdgeSecurityServicesAggregatedListCall struct { +type NetworkAttachmentsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -107591,12 +111675,12 @@ type NetworkEdgeSecurityServicesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves the list of all NetworkEdgeSecurityService -// resources available to the specified project. +// AggregatedList: Retrieves the list of all NetworkAttachment +// resources, regional and global, available to the specified project. // -// - project: Name of the project scoping this request. -func (r *NetworkEdgeSecurityServicesService) AggregatedList(project string) *NetworkEdgeSecurityServicesAggregatedListCall { - c := &NetworkEdgeSecurityServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +func (r *NetworkAttachmentsService) AggregatedList(project string) *NetworkAttachmentsAggregatedListCall { + c := &NetworkAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -107636,7 +111720,7 @@ func (r *NetworkEdgeSecurityServicesService) AggregatedList(project string) *Net // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne // .*instance`. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) Filter(filter string) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) Filter(filter string) *NetworkAttachmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -107649,7 +111733,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Filter(filter string) *N // response. For resource types which predate this field, if this flag // is omitted or false, only scopes of the scope types where the // resource type is expected to be found will be included. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkAttachmentsAggregatedListCall { c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) return c } @@ -107660,7 +111744,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) IncludeAllScopes(include // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *NetworkEdgeSecurityServicesAggregatedListCall) MaxResults(maxResults int64) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) MaxResults(maxResults int64) *NetworkAttachmentsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -107674,7 +111758,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) MaxResults(maxResults in // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) OrderBy(orderBy string) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) OrderBy(orderBy string) *NetworkAttachmentsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -107682,7 +111766,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) OrderBy(orderBy string) // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) PageToken(pageToken string) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) PageToken(pageToken string) *NetworkAttachmentsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -107691,7 +111775,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) PageToken(pageToken stri // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkAttachmentsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -107699,7 +111783,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) ReturnPartialSuccess(ret // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkAttachmentsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -107709,7 +111793,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Fields(s ...googleapi.Fi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkAttachmentsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -107717,21 +111801,21 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) IfNoneMatch(entityTag st // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) Context(ctx context.Context) *NetworkAttachmentsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) Header() http.Header { +func (c *NetworkAttachmentsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -107744,7 +111828,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (* var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkEdgeSecurityServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkAttachments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -107757,35 +111841,33 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (* return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEdgeSecurityServices.aggregatedList" call. -// Exactly one of *NetworkEdgeSecurityServiceAggregatedList or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *NetworkEdgeSecurityServiceAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityServiceAggregatedList, error) { +// Do executes the "compute.networkAttachments.aggregatedList" call. +// Exactly one of *NetworkAttachmentAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *NetworkAttachmentAggregatedList.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkAttachmentAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NetworkEdgeSecurityServiceAggregatedList{ + ret := &NetworkAttachmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -107797,10 +111879,10 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Retrieves the list of all NetworkEdgeSecurityService resources available to the specified project.", - // "flatPath": "projects/{project}/aggregated/networkEdgeSecurityServices", + // "description": "Retrieves the list of all NetworkAttachment resources, regional and global, available to the specified project.", + // "flatPath": "projects/{project}/aggregated/networkAttachments", // "httpMethod": "GET", - // "id": "compute.networkEdgeSecurityServices.aggregatedList", + // "id": "compute.networkAttachments.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -107834,7 +111916,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal // "type": "string" // }, // "project": { - // "description": "Name of the project scoping this request.", + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, @@ -107846,9 +111928,9 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/networkEdgeSecurityServices", + // "path": "projects/{project}/aggregated/networkAttachments", // "response": { - // "$ref": "NetworkEdgeSecurityServiceAggregatedList" + // "$ref": "NetworkAttachmentAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -107862,7 +111944,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEdgeSecurityServiceAggregatedList) error) error { +func (c *NetworkAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkAttachmentAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -107880,29 +111962,29 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Pages(ctx context.Contex } } -// method id "compute.networkEdgeSecurityServices.delete": +// method id "compute.networkAttachments.delete": -type NetworkEdgeSecurityServicesDeleteCall struct { - s *Service - project string - region string - networkEdgeSecurityService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkAttachmentsDeleteCall struct { + s *Service + project string + region string + networkAttachment string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified service. +// Delete: Deletes the specified NetworkAttachment in the given scope // -// - networkEdgeSecurityService: Name of the network edge security -// service to delete. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *NetworkEdgeSecurityServicesService) Delete(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesDeleteCall { - c := &NetworkEdgeSecurityServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkAttachment: Name of the NetworkAttachment resource to +// delete. +// - project: Project ID for this request. +// - region: Name of the region of this request. +func (r *NetworkAttachmentsService) Delete(project string, region string, networkAttachment string) *NetworkAttachmentsDeleteCall { + c := &NetworkAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.networkEdgeSecurityService = networkEdgeSecurityService + c.networkAttachment = networkAttachment return c } @@ -107916,8 +111998,9 @@ func (r *NetworkEdgeSecurityServicesService) Delete(project string, region strin // received, and if so, will ignore the second request. This prevents // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworkEdgeSecurityServicesDeleteCall) RequestId(requestId string) *NetworkEdgeSecurityServicesDeleteCall { +// supported ( 00000000-0000-0000-0000-000000000000). end_interface: +// MixerMutationRequestBuilder +func (c *NetworkAttachmentsDeleteCall) RequestId(requestId string) *NetworkAttachmentsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -107925,7 +112008,7 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) RequestId(requestId string) *Net // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEdgeSecurityServicesDeleteCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesDeleteCall { +func (c *NetworkAttachmentsDeleteCall) Fields(s ...googleapi.Field) *NetworkAttachmentsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -107933,21 +112016,21 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) Fields(s ...googleapi.Field) *Ne // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEdgeSecurityServicesDeleteCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesDeleteCall { +func (c *NetworkAttachmentsDeleteCall) Context(ctx context.Context) *NetworkAttachmentsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEdgeSecurityServicesDeleteCall) Header() http.Header { +func (c *NetworkAttachmentsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -107957,7 +112040,7 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Res var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -107965,38 +112048,38 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "networkEdgeSecurityService": c.networkEdgeSecurityService, + "project": c.project, + "region": c.region, + "networkAttachment": c.networkAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEdgeSecurityServices.delete" call. +// Do executes the "compute.networkAttachments.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -108010,18 +112093,18 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes the specified service.", - // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "description": "Deletes the specified NetworkAttachment in the given scope", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", // "httpMethod": "DELETE", - // "id": "compute.networkEdgeSecurityServices.delete", + // "id": "compute.networkAttachments.delete", // "parameterOrder": [ // "project", // "region", - // "networkEdgeSecurityService" + // "networkAttachment" // ], // "parameters": { - // "networkEdgeSecurityService": { - // "description": "Name of the network edge security service to delete.", + // "networkAttachment": { + // "description": "Name of the NetworkAttachment resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -108035,19 +112118,19 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region of this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", // "response": { // "$ref": "Operation" // }, @@ -108059,37 +112142,38 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) } -// method id "compute.networkEdgeSecurityServices.get": +// method id "compute.networkAttachments.get": -type NetworkEdgeSecurityServicesGetCall struct { - s *Service - project string - region string - networkEdgeSecurityService string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworkAttachmentsGetCall struct { + s *Service + project string + region string + networkAttachment string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Gets a specified NetworkEdgeSecurityService. +// Get: Returns the specified NetworkAttachment resource in the given +// scope. // -// - networkEdgeSecurityService: Name of the network edge security -// service to get. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *NetworkEdgeSecurityServicesService) Get(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesGetCall { - c := &NetworkEdgeSecurityServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkAttachment: Name of the NetworkAttachment resource to +// return. +// - project: Project ID for this request. +// - region: Name of the region of this request. +func (r *NetworkAttachmentsService) Get(project string, region string, networkAttachment string) *NetworkAttachmentsGetCall { + c := &NetworkAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.networkEdgeSecurityService = networkEdgeSecurityService + c.networkAttachment = networkAttachment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEdgeSecurityServicesGetCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesGetCall { +func (c *NetworkAttachmentsGetCall) Fields(s ...googleapi.Field) *NetworkAttachmentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108099,7 +112183,7 @@ func (c *NetworkEdgeSecurityServicesGetCall) Fields(s ...googleapi.Field) *Netwo // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworkEdgeSecurityServicesGetCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesGetCall { +func (c *NetworkAttachmentsGetCall) IfNoneMatch(entityTag string) *NetworkAttachmentsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -108107,21 +112191,21 @@ func (c *NetworkEdgeSecurityServicesGetCall) IfNoneMatch(entityTag string) *Netw // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEdgeSecurityServicesGetCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesGetCall { +func (c *NetworkAttachmentsGetCall) Context(ctx context.Context) *NetworkAttachmentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEdgeSecurityServicesGetCall) Header() http.Header { +func (c *NetworkAttachmentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -108134,7 +112218,7 @@ func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -108142,40 +112226,40 @@ func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Respon } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "networkEdgeSecurityService": c.networkEdgeSecurityService, + "project": c.project, + "region": c.region, + "networkAttachment": c.networkAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEdgeSecurityServices.get" call. -// Exactly one of *NetworkEdgeSecurityService or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *NetworkEdgeSecurityService.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.networkAttachments.get" call. +// Exactly one of *NetworkAttachment or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NetworkAttachment.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityService, error) { +func (c *NetworkAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*NetworkAttachment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NetworkEdgeSecurityService{ + ret := &NetworkAttachment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -108187,18 +112271,18 @@ func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Gets a specified NetworkEdgeSecurityService.", - // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "description": "Returns the specified NetworkAttachment resource in the given scope.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", // "httpMethod": "GET", - // "id": "compute.networkEdgeSecurityServices.get", + // "id": "compute.networkAttachments.get", // "parameterOrder": [ // "project", // "region", - // "networkEdgeSecurityService" + // "networkAttachment" // ], // "parameters": { - // "networkEdgeSecurityService": { - // "description": "Name of the network edge security service to get.", + // "networkAttachment": { + // "description": "Name of the NetworkAttachment resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -108212,16 +112296,16 @@ func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region of this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", // "response": { - // "$ref": "NetworkEdgeSecurityService" + // "$ref": "NetworkAttachment" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -108232,135 +112316,130 @@ func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.networkEdgeSecurityServices.insert": +// method id "compute.networkAttachments.getIamPolicy": -type NetworkEdgeSecurityServicesInsertCall struct { - s *Service - project string - region string - networkedgesecurityservice *NetworkEdgeSecurityService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkAttachmentsGetIamPolicyCall struct { + s *Service + project string + region string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a new service in the specified project using the data -// included in the request. +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. // // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *NetworkEdgeSecurityServicesService) Insert(project string, region string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesInsertCall { - c := &NetworkEdgeSecurityServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *NetworkAttachmentsService) GetIamPolicy(project string, region string, resource string) *NetworkAttachmentsGetIamPolicyCall { + c := &NetworkAttachmentsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.networkedgesecurityservice = networkedgesecurityservice - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworkEdgeSecurityServicesInsertCall) RequestId(requestId string) *NetworkEdgeSecurityServicesInsertCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource return c } -// ValidateOnly sets the optional parameter "validateOnly": If true, the -// request will not be committed. -func (c *NetworkEdgeSecurityServicesInsertCall) ValidateOnly(validateOnly bool) *NetworkEdgeSecurityServicesInsertCall { - c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *NetworkAttachmentsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *NetworkAttachmentsGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEdgeSecurityServicesInsertCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesInsertCall { +func (c *NetworkAttachmentsGetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkAttachmentsGetIamPolicyCall) IfNoneMatch(entityTag string) *NetworkAttachmentsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEdgeSecurityServicesInsertCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesInsertCall { +func (c *NetworkAttachmentsGetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEdgeSecurityServicesInsertCall) Header() http.Header { +func (c *NetworkAttachmentsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEdgeSecurityServicesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEdgeSecurityServices.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkAttachments.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NetworkAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -108372,15 +112451,22 @@ func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a new service in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices", - // "httpMethod": "POST", - // "id": "compute.networkEdgeSecurityServices.insert", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.networkAttachments.getIamPolicy", // "parameterOrder": [ // "project", - // "region" + // "region", + // "resource" // ], // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -108389,70 +112475,55 @@ func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" - // }, - // "validateOnly": { - // "description": "If true, the request will not be committed.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices", - // "request": { - // "$ref": "NetworkEdgeSecurityService" - // }, + // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networkEdgeSecurityServices.patch": +// method id "compute.networkAttachments.insert": -type NetworkEdgeSecurityServicesPatchCall struct { - s *Service - project string - region string - networkEdgeSecurityService string - networkedgesecurityservice *NetworkEdgeSecurityService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkAttachmentsInsertCall struct { + s *Service + project string + region string + networkattachment *NetworkAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified policy with the data included in the -// request. +// Insert: Creates a NetworkAttachment in the specified project in the +// given scope using the parameters that are included in the request. // -// - networkEdgeSecurityService: Name of the network edge security -// service to update. // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *NetworkEdgeSecurityServicesService) Patch(project string, region string, networkEdgeSecurityService string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesPatchCall { - c := &NetworkEdgeSecurityServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region of this request. +func (r *NetworkAttachmentsService) Insert(project string, region string, networkattachment *NetworkAttachment) *NetworkAttachmentsInsertCall { + c := &NetworkAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.networkEdgeSecurityService = networkEdgeSecurityService - c.networkedgesecurityservice = networkedgesecurityservice - return c -} - -// Paths sets the optional parameter "paths": -func (c *NetworkEdgeSecurityServicesPatchCall) Paths(paths ...string) *NetworkEdgeSecurityServicesPatchCall { - c.urlParams_.SetMulti("paths", append([]string{}, paths...)) + c.networkattachment = networkattachment return c } @@ -108466,23 +112537,17 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Paths(paths ...string) *NetworkEd // received, and if so, will ignore the second request. This prevents // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworkEdgeSecurityServicesPatchCall) RequestId(requestId string) *NetworkEdgeSecurityServicesPatchCall { +// supported ( 00000000-0000-0000-0000-000000000000). end_interface: +// MixerMutationRequestBuilder +func (c *NetworkAttachmentsInsertCall) RequestId(requestId string) *NetworkAttachmentsInsertCall { c.urlParams_.Set("requestId", requestId) return c } -// UpdateMask sets the optional parameter "updateMask": Indicates fields -// to be updated as part of this request. -func (c *NetworkEdgeSecurityServicesPatchCall) UpdateMask(updateMask string) *NetworkEdgeSecurityServicesPatchCall { - c.urlParams_.Set("updateMask", updateMask) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEdgeSecurityServicesPatchCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesPatchCall { +func (c *NetworkAttachmentsInsertCall) Fields(s ...googleapi.Field) *NetworkAttachmentsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108490,21 +112555,21 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Fields(s ...googleapi.Field) *Net // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEdgeSecurityServicesPatchCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesPatchCall { +func (c *NetworkAttachmentsInsertCall) Context(ctx context.Context) *NetworkAttachmentsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEdgeSecurityServicesPatchCall) Header() http.Header { +func (c *NetworkAttachmentsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEdgeSecurityServicesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -108512,53 +112577,52 @@ func (c *NetworkEdgeSecurityServicesPatchCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkattachment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "networkEdgeSecurityService": c.networkEdgeSecurityService, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEdgeSecurityServices.patch" call. +// Do executes the "compute.networkAttachments.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -108572,28 +112636,15 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Patches the specified policy with the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", - // "httpMethod": "PATCH", - // "id": "compute.networkEdgeSecurityServices.patch", + // "description": "Creates a NetworkAttachment in the specified project in the given scope using the parameters that are included in the request.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments", + // "httpMethod": "POST", + // "id": "compute.networkAttachments.insert", // "parameterOrder": [ // "project", - // "region", - // "networkEdgeSecurityService" + // "region" // ], // "parameters": { - // "networkEdgeSecurityService": { - // "description": "Name of the network edge security service to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "paths": { - // "location": "query", - // "repeated": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -108602,27 +112653,21 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region of this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "updateMask": { - // "description": "Indicates fields to be updated as part of this request.", - // "format": "google-fieldmask", + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "path": "projects/{project}/regions/{region}/networkAttachments", // "request": { - // "$ref": "NetworkEdgeSecurityService" + // "$ref": "NetworkAttachment" // }, // "response": { // "$ref": "Operation" @@ -108635,23 +112680,1706 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) } -// method id "compute.networkEndpointGroups.aggregatedList": +// method id "compute.networkAttachments.list": -type NetworkEndpointGroupsAggregatedListCall struct { +type NetworkAttachmentsListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves the list of network endpoint groups and -// sorts them by zone. +// List: Lists the NetworkAttachments for a project in the given scope. // // - project: Project ID for this request. -func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { - c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region of this request. +func (r *NetworkAttachmentsService) List(project string, region string) *NetworkAttachmentsListCall { + c := &NetworkAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NetworkAttachmentsListCall) Filter(filter string) *NetworkAttachmentsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NetworkAttachmentsListCall) MaxResults(maxResults int64) *NetworkAttachmentsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *NetworkAttachmentsListCall) OrderBy(orderBy string) *NetworkAttachmentsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NetworkAttachmentsListCall) PageToken(pageToken string) *NetworkAttachmentsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *NetworkAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkAttachmentsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkAttachmentsListCall) Fields(s ...googleapi.Field) *NetworkAttachmentsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkAttachmentsListCall) IfNoneMatch(entityTag string) *NetworkAttachmentsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkAttachmentsListCall) Context(ctx context.Context) *NetworkAttachmentsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkAttachmentsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkAttachmentsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkAttachments.list" call. +// Exactly one of *NetworkAttachmentList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NetworkAttachmentList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkAttachmentsListCall) Do(opts ...googleapi.CallOption) (*NetworkAttachmentList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &NetworkAttachmentList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the NetworkAttachments for a project in the given scope.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments", + // "httpMethod": "GET", + // "id": "compute.networkAttachments.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region of this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkAttachments", + // "response": { + // "$ref": "NetworkAttachmentList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworkAttachmentsListCall) Pages(ctx context.Context, f func(*NetworkAttachmentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.networkAttachments.setIamPolicy": + +type NetworkAttachmentsSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *NetworkAttachmentsService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NetworkAttachmentsSetIamPolicyCall { + c := &NetworkAttachmentsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkAttachmentsSetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkAttachmentsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkAttachments.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", + // "httpMethod": "POST", + // "id": "compute.networkAttachments.setIamPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", + // "request": { + // "$ref": "RegionSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networkAttachments.testIamPermissions": + +type NetworkAttachmentsTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *NetworkAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkAttachmentsTestIamPermissionsCall { + c := &NetworkAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkAttachmentsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *NetworkAttachmentsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkAttachmentsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkAttachments.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", + // "httpMethod": "POST", + // "id": "compute.networkAttachments.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.networkEdgeSecurityServices.aggregatedList": + +type NetworkEdgeSecurityServicesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all NetworkEdgeSecurityService +// resources available to the specified project. +// +// - project: Name of the project scoping this request. +func (r *NetworkEdgeSecurityServicesService) AggregatedList(project string) *NetworkEdgeSecurityServicesAggregatedListCall { + c := &NetworkEdgeSecurityServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Filter(filter string) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NetworkEdgeSecurityServicesAggregatedListCall) MaxResults(maxResults int64) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) OrderBy(orderBy string) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) PageToken(pageToken string) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkEdgeSecurityServices") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkEdgeSecurityServices.aggregatedList" call. +// Exactly one of *NetworkEdgeSecurityServiceAggregatedList or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *NetworkEdgeSecurityServiceAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityServiceAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &NetworkEdgeSecurityServiceAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all NetworkEdgeSecurityService resources available to the specified project.", + // "flatPath": "projects/{project}/aggregated/networkEdgeSecurityServices", + // "httpMethod": "GET", + // "id": "compute.networkEdgeSecurityServices.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/networkEdgeSecurityServices", + // "response": { + // "$ref": "NetworkEdgeSecurityServiceAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEdgeSecurityServiceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.networkEdgeSecurityServices.delete": + +type NetworkEdgeSecurityServicesDeleteCall struct { + s *Service + project string + region string + networkEdgeSecurityService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified service. +// +// - networkEdgeSecurityService: Name of the network edge security +// service to delete. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *NetworkEdgeSecurityServicesService) Delete(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesDeleteCall { + c := &NetworkEdgeSecurityServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.networkEdgeSecurityService = networkEdgeSecurityService + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworkEdgeSecurityServicesDeleteCall) RequestId(requestId string) *NetworkEdgeSecurityServicesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkEdgeSecurityServicesDeleteCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkEdgeSecurityServicesDeleteCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkEdgeSecurityServicesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "networkEdgeSecurityService": c.networkEdgeSecurityService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkEdgeSecurityServices.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified service.", + // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "httpMethod": "DELETE", + // "id": "compute.networkEdgeSecurityServices.delete", + // "parameterOrder": [ + // "project", + // "region", + // "networkEdgeSecurityService" + // ], + // "parameters": { + // "networkEdgeSecurityService": { + // "description": "Name of the network edge security service to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networkEdgeSecurityServices.get": + +type NetworkEdgeSecurityServicesGetCall struct { + s *Service + project string + region string + networkEdgeSecurityService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets a specified NetworkEdgeSecurityService. +// +// - networkEdgeSecurityService: Name of the network edge security +// service to get. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *NetworkEdgeSecurityServicesService) Get(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesGetCall { + c := &NetworkEdgeSecurityServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.networkEdgeSecurityService = networkEdgeSecurityService + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkEdgeSecurityServicesGetCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkEdgeSecurityServicesGetCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkEdgeSecurityServicesGetCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkEdgeSecurityServicesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "networkEdgeSecurityService": c.networkEdgeSecurityService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkEdgeSecurityServices.get" call. +// Exactly one of *NetworkEdgeSecurityService or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NetworkEdgeSecurityService.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityService, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &NetworkEdgeSecurityService{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a specified NetworkEdgeSecurityService.", + // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "httpMethod": "GET", + // "id": "compute.networkEdgeSecurityServices.get", + // "parameterOrder": [ + // "project", + // "region", + // "networkEdgeSecurityService" + // ], + // "parameters": { + // "networkEdgeSecurityService": { + // "description": "Name of the network edge security service to get.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "response": { + // "$ref": "NetworkEdgeSecurityService" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.networkEdgeSecurityServices.insert": + +type NetworkEdgeSecurityServicesInsertCall struct { + s *Service + project string + region string + networkedgesecurityservice *NetworkEdgeSecurityService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new service in the specified project using the data +// included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *NetworkEdgeSecurityServicesService) Insert(project string, region string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesInsertCall { + c := &NetworkEdgeSecurityServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.networkedgesecurityservice = networkedgesecurityservice + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworkEdgeSecurityServicesInsertCall) RequestId(requestId string) *NetworkEdgeSecurityServicesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *NetworkEdgeSecurityServicesInsertCall) ValidateOnly(validateOnly bool) *NetworkEdgeSecurityServicesInsertCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkEdgeSecurityServicesInsertCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkEdgeSecurityServicesInsertCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkEdgeSecurityServicesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkEdgeSecurityServicesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkEdgeSecurityServices.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new service in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices", + // "httpMethod": "POST", + // "id": "compute.networkEdgeSecurityServices.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "validateOnly": { + // "description": "If true, the request will not be committed.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices", + // "request": { + // "$ref": "NetworkEdgeSecurityService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networkEdgeSecurityServices.patch": + +type NetworkEdgeSecurityServicesPatchCall struct { + s *Service + project string + region string + networkEdgeSecurityService string + networkedgesecurityservice *NetworkEdgeSecurityService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified policy with the data included in the +// request. +// +// - networkEdgeSecurityService: Name of the network edge security +// service to update. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *NetworkEdgeSecurityServicesService) Patch(project string, region string, networkEdgeSecurityService string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesPatchCall { + c := &NetworkEdgeSecurityServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.networkEdgeSecurityService = networkEdgeSecurityService + c.networkedgesecurityservice = networkedgesecurityservice + return c +} + +// Paths sets the optional parameter "paths": +func (c *NetworkEdgeSecurityServicesPatchCall) Paths(paths ...string) *NetworkEdgeSecurityServicesPatchCall { + c.urlParams_.SetMulti("paths", append([]string{}, paths...)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworkEdgeSecurityServicesPatchCall) RequestId(requestId string) *NetworkEdgeSecurityServicesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// UpdateMask sets the optional parameter "updateMask": Indicates fields +// to be updated as part of this request. +func (c *NetworkEdgeSecurityServicesPatchCall) UpdateMask(updateMask string) *NetworkEdgeSecurityServicesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkEdgeSecurityServicesPatchCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkEdgeSecurityServicesPatchCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkEdgeSecurityServicesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkEdgeSecurityServicesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "networkEdgeSecurityService": c.networkEdgeSecurityService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkEdgeSecurityServices.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified policy with the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "httpMethod": "PATCH", + // "id": "compute.networkEdgeSecurityServices.patch", + // "parameterOrder": [ + // "project", + // "region", + // "networkEdgeSecurityService" + // ], + // "parameters": { + // "networkEdgeSecurityService": { + // "description": "Name of the network edge security service to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "paths": { + // "location": "query", + // "repeated": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "updateMask": { + // "description": "Indicates fields to be updated as part of this request.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "request": { + // "$ref": "NetworkEdgeSecurityService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networkEndpointGroups.aggregatedList": + +type NetworkEndpointGroupsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of network endpoint groups and +// sorts them by zone. +// +// - project: Project ID for this request. +func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { + c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -108827,17 +114555,17 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -108950,12 +114678,12 @@ type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { // AttachNetworkEndpoints: Attach a list of network endpoints to the // specified network endpoint group. // -// - networkEndpointGroup: The name of the network endpoint group where -// you are attaching network endpoints to. It should comply with -// RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. +// - networkEndpointGroup: The name of the network endpoint group where +// you are attaching network endpoints to. It should comply with +// RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -109050,17 +114778,17 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -109141,11 +114869,11 @@ type NetworkEndpointGroupsDeleteCall struct { // terminated when the NEG is deleted. Note that the NEG cannot be // deleted if there are backend services referencing it. // -// - networkEndpointGroup: The name of the network endpoint group to -// delete. It should comply with RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. +// - networkEndpointGroup: The name of the network endpoint group to +// delete. It should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -109234,17 +114962,17 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -109321,11 +115049,11 @@ type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { // DetachNetworkEndpoints: Detach a list of network endpoints from the // specified network endpoint group. // -// - networkEndpointGroup: The name of the network endpoint group where -// you are removing network endpoints. It should comply with RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. +// - networkEndpointGroup: The name of the network endpoint group where +// you are removing network endpoints. It should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -109420,17 +115148,17 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -109510,11 +115238,11 @@ type NetworkEndpointGroupsGetCall struct { // Get: Returns the specified network endpoint group. Gets a list of // available network endpoint groups by making a list() request. // -// - networkEndpointGroup: The name of the network endpoint group. It -// should comply with RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. +// - networkEndpointGroup: The name of the network endpoint group. It +// should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -109600,17 +115328,17 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ @@ -109682,9 +115410,9 @@ type NetworkEndpointGroupsInsertCall struct { // Insert: Creates a network endpoint group in the specified project // using the parameters that are included in the request. // -// - project: Project ID for this request. -// - zone: The name of the zone where you want to create the network -// endpoint group. It should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where you want to create the network +// endpoint group. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -109777,17 +115505,17 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -109859,9 +115587,9 @@ type NetworkEndpointGroupsListCall struct { // List: Retrieves the list of network endpoint groups that are located // in the specified project and zone. // -// - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110027,17 +115755,17 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroupList{ ServerResponse: googleapi.ServerResponse{ @@ -110152,12 +115880,12 @@ type NetworkEndpointGroupsListNetworkEndpointsCall struct { // ListNetworkEndpoints: Lists the network endpoints in the specified // network endpoint group. // -// - networkEndpointGroup: The name of the network endpoint group from -// which you want to generate a list of included network endpoints. It -// should comply with RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. +// - networkEndpointGroup: The name of the network endpoint group from +// which you want to generate a list of included network endpoints. It +// should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110320,17 +116048,17 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroupsListNetworkEndpoints{ ServerResponse: googleapi.ServerResponse{ @@ -110536,17 +116264,17 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -110727,17 +116455,17 @@ func (c *NetworkFirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOpt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -110926,17 +116654,17 @@ func (c *NetworkFirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -111114,17 +116842,17 @@ func (c *NetworkFirewallPoliciesCloneRulesCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -111285,17 +117013,17 @@ func (c *NetworkFirewallPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -111449,17 +117177,17 @@ func (c *NetworkFirewallPoliciesGetCall) Do(opts ...googleapi.CallOption) (*Fire if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -111524,9 +117252,9 @@ type NetworkFirewallPoliciesGetAssociationCall struct { // GetAssociation: Gets an association with the specified name. // -// - firewallPolicy: Name of the firewall policy to which the queried -// association belongs. -// - project: Project ID for this request. +// - firewallPolicy: Name of the firewall policy to which the queried +// association belongs. +// - project: Project ID for this request. func (r *NetworkFirewallPoliciesService) GetAssociation(project string, firewallPolicy string) *NetworkFirewallPoliciesGetAssociationCall { c := &NetworkFirewallPoliciesGetAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -111617,17 +117345,17 @@ func (c *NetworkFirewallPoliciesGetAssociationCall) Do(opts ...googleapi.CallOpt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyAssociation{ ServerResponse: googleapi.ServerResponse{ @@ -111790,17 +117518,17 @@ func (c *NetworkFirewallPoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -111871,9 +117599,9 @@ type NetworkFirewallPoliciesGetRuleCall struct { // GetRule: Gets a rule of the specified priority. // -// - firewallPolicy: Name of the firewall policy to which the queried -// rule belongs. -// - project: Project ID for this request. +// - firewallPolicy: Name of the firewall policy to which the queried +// rule belongs. +// - project: Project ID for this request. func (r *NetworkFirewallPoliciesService) GetRule(project string, firewallPolicy string) *NetworkFirewallPoliciesGetRuleCall { c := &NetworkFirewallPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -111964,17 +117692,17 @@ func (c *NetworkFirewallPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyRule{ ServerResponse: googleapi.ServerResponse{ @@ -112136,17 +117864,17 @@ func (c *NetworkFirewallPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -112374,17 +118102,17 @@ func (c *NetworkFirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*Fir if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyList{ ServerResponse: googleapi.ServerResponse{ @@ -112585,17 +118313,17 @@ func (c *NetworkFirewallPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -112768,17 +118496,17 @@ func (c *NetworkFirewallPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -112951,17 +118679,17 @@ func (c *NetworkFirewallPoliciesRemoveAssociationCall) Do(opts ...googleapi.Call if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -113129,17 +118857,17 @@ func (c *NetworkFirewallPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -113293,17 +119021,17 @@ func (c *NetworkFirewallPoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -113449,17 +119177,17 @@ func (c *NetworkFirewallPoliciesTestIamPermissionsCall) Do(opts ...googleapi.Cal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -113621,17 +119349,17 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -113790,17 +119518,17 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -113955,17 +119683,17 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Network{ ServerResponse: googleapi.ServerResponse{ @@ -114117,17 +119845,17 @@ func (c *NetworksGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (*N if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworksGetEffectiveFirewallsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -114283,17 +120011,17 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -114521,17 +120249,17 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkList{ ServerResponse: googleapi.ServerResponse{ @@ -114651,8 +120379,9 @@ func (r *NetworksService) ListPeeringRoutes(project string, network string) *Net // the exchanged routes. // // Possible values: -// "INCOMING" - For routes exported from peer network. -// "OUTGOING" - For routes exported from local network. +// +// "INCOMING" - For routes exported from peer network. +// "OUTGOING" - For routes exported from local network. func (c *NetworksListPeeringRoutesCall) Direction(direction string) *NetworksListPeeringRoutesCall { c.urlParams_.Set("direction", direction) return c @@ -114831,17 +120560,17 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ExchangedPeeringRoutesList{ ServerResponse: googleapi.ServerResponse{ @@ -115074,17 +120803,17 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -115250,17 +120979,17 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -115420,17 +121149,17 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -115502,9 +121231,9 @@ type NetworksUpdatePeeringCall struct { // NetworkPeering.export_custom_routes field and the // NetworkPeering.import_custom_routes field. // -// - network: Name of the network resource which the updated peering is -// belonging to. -// - project: Project ID for this request. +// - network: Name of the network resource which the updated peering is +// belonging to. +// - project: Project ID for this request. func (r *NetworksService) UpdatePeering(project string, network string, networksupdatepeeringrequest *NetworksUpdatePeeringRequest) *NetworksUpdatePeeringCall { c := &NetworksUpdatePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -115597,17 +121326,17 @@ func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -115777,17 +121506,17 @@ func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -116044,17 +121773,17 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -116256,17 +121985,17 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -116344,10 +122073,10 @@ type NodeGroupsDeleteNodesCall struct { // DeleteNodes: Deletes specified nodes from the node group. // -// - nodeGroup: Name of the NodeGroup resource whose nodes will be -// deleted. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. +// - nodeGroup: Name of the NodeGroup resource whose nodes will be +// deleted. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup string, nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest) *NodeGroupsDeleteNodesCall { c := &NodeGroupsDeleteNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -116442,17 +122171,17 @@ func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -116623,17 +122352,17 @@ func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeGroup{ ServerResponse: googleapi.ServerResponse{ @@ -116803,17 +122532,17 @@ func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -116989,17 +122718,17 @@ func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -117248,17 +122977,17 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeGroupList{ ServerResponse: googleapi.ServerResponse{ @@ -117372,10 +123101,10 @@ type NodeGroupsListNodesCall struct { // ListNodes: Lists nodes in the node group. // -// - nodeGroup: Name of the NodeGroup resource whose nodes you want to -// list. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. +// - nodeGroup: Name of the NodeGroup resource whose nodes you want to +// list. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *NodeGroupsService) ListNodes(project string, zone string, nodeGroup string) *NodeGroupsListNodesCall { c := &NodeGroupsListNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -117530,17 +123259,17 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeGroupsListNodes{ ServerResponse: googleapi.ServerResponse{ @@ -117760,17 +123489,17 @@ func (c *NodeGroupsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -117933,17 +123662,17 @@ func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -118116,17 +123845,17 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -118289,17 +124018,17 @@ func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Te if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -118551,17 +124280,17 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeTemplateAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -118763,17 +124492,17 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -118940,17 +124669,17 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeTemplate{ ServerResponse: googleapi.ServerResponse{ @@ -119120,17 +124849,17 @@ func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -119304,17 +125033,17 @@ func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -119554,17 +125283,17 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeTemplateList{ ServerResponse: googleapi.ServerResponse{ @@ -119761,17 +125490,17 @@ func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -119929,17 +125658,17 @@ func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -120191,17 +125920,17 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -120402,17 +126131,17 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeType{ ServerResponse: googleapi.ServerResponse{ @@ -120653,17 +126382,17 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeTypeList{ ServerResponse: googleapi.ServerResponse{ @@ -120953,17 +126682,17 @@ func (c *PacketMirroringsAggregatedListCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PacketMirroringAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -121165,17 +126894,17 @@ func (c *PacketMirroringsDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -121341,17 +127070,17 @@ func (c *PacketMirroringsGetCall) Do(opts ...googleapi.CallOption) (*PacketMirro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PacketMirroring{ ServerResponse: googleapi.ServerResponse{ @@ -121519,17 +127248,17 @@ func (c *PacketMirroringsInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -121769,17 +127498,17 @@ func (c *PacketMirroringsListCall) Do(opts ...googleapi.CallOption) (*PacketMirr if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PacketMirroringList{ ServerResponse: googleapi.ServerResponse{ @@ -121993,17 +127722,17 @@ func (c *PacketMirroringsPatchCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -122166,17 +127895,17 @@ func (c *PacketMirroringsTestIamPermissionsCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -122335,17 +128064,17 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -122497,17 +128226,17 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -122654,17 +128383,17 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -122817,17 +128546,17 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -122979,17 +128708,17 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Project{ ServerResponse: googleapi.ServerResponse{ @@ -123128,17 +128857,17 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Project{ ServerResponse: googleapi.ServerResponse{ @@ -123358,17 +129087,17 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ProjectsGetXpnResources{ ServerResponse: googleapi.ServerResponse{ @@ -123630,17 +129359,17 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &XpnHostList{ ServerResponse: googleapi.ServerResponse{ @@ -123838,17 +129567,17 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -124007,17 +129736,17 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -124172,17 +129901,17 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -124339,17 +130068,17 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -124506,17 +130235,17 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -124582,9 +130311,9 @@ type PublicAdvertisedPrefixesDeleteCall struct { // Delete: Deletes the specified PublicAdvertisedPrefix // -// - project: Project ID for this request. -// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource -// to delete. +// - project: Project ID for this request. +// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource +// to delete. func (r *PublicAdvertisedPrefixesService) Delete(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesDeleteCall { c := &PublicAdvertisedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -124671,17 +130400,17 @@ func (c *PublicAdvertisedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -124750,9 +130479,9 @@ type PublicAdvertisedPrefixesGetCall struct { // Get: Returns the specified PublicAdvertisedPrefix resource. // -// - project: Project ID for this request. -// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource -// to return. +// - project: Project ID for this request. +// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource +// to return. func (r *PublicAdvertisedPrefixesService) Get(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesGetCall { c := &PublicAdvertisedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -124836,17 +130565,17 @@ func (c *PublicAdvertisedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*Pub if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PublicAdvertisedPrefix{ ServerResponse: googleapi.ServerResponse{ @@ -125002,17 +130731,17 @@ func (c *PublicAdvertisedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -125239,17 +130968,17 @@ func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pu if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PublicAdvertisedPrefixList{ ServerResponse: googleapi.ServerResponse{ @@ -125357,9 +131086,9 @@ type PublicAdvertisedPrefixesPatchCall struct { // in the request. This method supports PATCH semantics and uses JSON // merge patch format and processing rules. // -// - project: Project ID for this request. -// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource -// to patch. +// - project: Project ID for this request. +// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource +// to patch. func (r *PublicAdvertisedPrefixesService) Patch(project string, publicAdvertisedPrefix string, publicadvertisedprefix *PublicAdvertisedPrefix) *PublicAdvertisedPrefixesPatchCall { c := &PublicAdvertisedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -125452,17 +131181,17 @@ func (c *PublicAdvertisedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -125712,17 +131441,17 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOpt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PublicDelegatedPrefixAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -125834,10 +131563,10 @@ type PublicDelegatedPrefixesDeleteCall struct { // Delete: Deletes the specified PublicDelegatedPrefix in the given // region. // -// - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to delete. -// - region: Name of the region of this request. +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to delete. +// - region: Name of the region of this request. func (r *PublicDelegatedPrefixesService) Delete(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesDeleteCall { c := &PublicDelegatedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -125926,17 +131655,17 @@ func (c *PublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -126015,10 +131744,10 @@ type PublicDelegatedPrefixesGetCall struct { // Get: Returns the specified PublicDelegatedPrefix resource in the // given region. // -// - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to return. -// - region: Name of the region of this request. +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to return. +// - region: Name of the region of this request. func (r *PublicDelegatedPrefixesService) Get(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesGetCall { c := &PublicDelegatedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -126104,17 +131833,17 @@ func (c *PublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*Publ if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PublicDelegatedPrefix{ ServerResponse: googleapi.ServerResponse{ @@ -126283,17 +132012,17 @@ func (c *PublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -126533,17 +132262,17 @@ func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pub if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PublicDelegatedPrefixList{ ServerResponse: googleapi.ServerResponse{ @@ -126660,10 +132389,10 @@ type PublicDelegatedPrefixesPatchCall struct { // data included in the request. This method supports PATCH semantics // and uses JSON merge patch format and processing rules. // -// - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to patch. -// - region: Name of the region for this request. +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to patch. +// - region: Name of the region for this request. func (r *PublicDelegatedPrefixesService) Patch(project string, region string, publicDelegatedPrefix string, publicdelegatedprefix *PublicDelegatedPrefix) *PublicDelegatedPrefixesPatchCall { c := &PublicDelegatedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -126758,17 +132487,17 @@ func (c *PublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -126939,17 +132668,17 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -127115,17 +132844,17 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Autoscaler{ ServerResponse: googleapi.ServerResponse{ @@ -127293,17 +133022,17 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -127543,17 +133272,17 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionAutoscalerList{ ServerResponse: googleapi.ServerResponse{ @@ -127770,17 +133499,17 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -127960,17 +133689,17 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -128139,17 +133868,17 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -128315,17 +134044,17 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendService{ ServerResponse: googleapi.ServerResponse{ @@ -128400,10 +134129,10 @@ type RegionBackendServicesGetHealthCall struct { // GetHealth: Gets the most recent health check results for this // regional BackendService. // -// - backendService: Name of the BackendService resource for which to -// get health. -// - project: . -// - region: Name of the region scoping this request. +// - backendService: Name of the BackendService resource for which to +// get health. +// - project: . +// - region: Name of the region scoping this request. func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -128482,17 +134211,17 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendServiceGroupHealth{ ServerResponse: googleapi.ServerResponse{ @@ -128553,6 +134282,192 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* } +// method id "compute.regionBackendServices.getIamPolicy": + +type RegionBackendServicesGetIamPolicyCall struct { + s *Service + project string + region string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *RegionBackendServicesService) GetIamPolicy(project string, region string, resource string) *RegionBackendServicesGetIamPolicyCall { + c := &RegionBackendServicesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *RegionBackendServicesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *RegionBackendServicesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionBackendServicesGetIamPolicyCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionBackendServicesGetIamPolicyCall) IfNoneMatch(entityTag string) *RegionBackendServicesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionBackendServicesGetIamPolicyCall) Context(ctx context.Context) *RegionBackendServicesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionBackendServicesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionBackendServicesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionBackendServices.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionBackendServicesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.regionBackendServices.getIamPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.regionBackendServices.insert": type RegionBackendServicesInsertCall struct { @@ -128663,17 +134578,17 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -128913,17 +134828,17 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendServiceList{ ServerResponse: googleapi.ServerResponse{ @@ -129138,17 +135053,17 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -129214,6 +135129,174 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } +// method id "compute.regionBackendServices.setIamPolicy": + +type RegionBackendServicesSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *RegionBackendServicesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionBackendServicesSetIamPolicyCall { + c := &RegionBackendServicesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionBackendServicesSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionBackendServicesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionBackendServicesSetIamPolicyCall) Context(ctx context.Context) *RegionBackendServicesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionBackendServicesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionBackendServicesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionBackendServices.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionBackendServicesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", + // "httpMethod": "POST", + // "id": "compute.regionBackendServices.setIamPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", + // "request": { + // "$ref": "RegionSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.regionBackendServices.update": type RegionBackendServicesUpdateCall struct { @@ -129328,17 +135411,17 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -129595,17 +135678,17 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &CommitmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -129806,17 +135889,17 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Commitment{ ServerResponse: googleapi.ServerResponse{ @@ -129984,17 +136067,17 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -130234,17 +136317,17 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &CommitmentList{ ServerResponse: googleapi.ServerResponse{ @@ -130362,10 +136445,10 @@ type RegionCommitmentsUpdateCall struct { // part of update-mask. Only the following fields can be modified: // auto_renew. // -// - commitment: Name of the commitment for which auto renew is being -// updated. -// - project: Project ID for this request. -// - region: Name of the region for this request. +// - commitment: Name of the commitment for which auto renew is being +// updated. +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *RegionCommitmentsService) Update(project string, region string, commitment string, commitment2 *Commitment) *RegionCommitmentsUpdateCall { c := &RegionCommitmentsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -130473,17 +136556,17 @@ func (c *RegionCommitmentsUpdateCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -130664,17 +136747,17 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &DiskType{ ServerResponse: googleapi.ServerResponse{ @@ -130915,17 +136998,17 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionDiskTypeList{ ServerResponse: googleapi.ServerResponse{ @@ -131139,17 +137222,17 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -131330,17 +137413,17 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -131514,17 +137597,17 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -131689,17 +137772,17 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Disk{ ServerResponse: googleapi.ServerResponse{ @@ -131869,17 +137952,17 @@ func (c *RegionDisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -132060,17 +138143,17 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -132315,17 +138398,17 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &DiskList{ ServerResponse: googleapi.ServerResponse{ @@ -132538,17 +138621,17 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -132726,17 +138809,17 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -132899,17 +138982,17 @@ func (c *RegionDisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -133082,17 +139165,17 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -133255,17 +139338,17 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -133341,10 +139424,10 @@ type RegionHealthCheckServicesDeleteCall struct { // Delete: Deletes the specified regional HealthCheckService. // -// - healthCheckService: Name of the HealthCheckService to delete. The -// name must be 1-63 characters long, and comply with RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - healthCheckService: Name of the HealthCheckService to delete. The +// name must be 1-63 characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthCheckServicesService) Delete(project string, region string, healthCheckService string) *RegionHealthCheckServicesDeleteCall { c := &RegionHealthCheckServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -133433,17 +139516,17 @@ func (c *RegionHealthCheckServicesDeleteCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -133520,10 +139603,10 @@ type RegionHealthCheckServicesGetCall struct { // Get: Returns the specified regional HealthCheckService resource. // -// - healthCheckService: Name of the HealthCheckService to update. The -// name must be 1-63 characters long, and comply with RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - healthCheckService: Name of the HealthCheckService to update. The +// name must be 1-63 characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthCheckServicesService) Get(project string, region string, healthCheckService string) *RegionHealthCheckServicesGetCall { c := &RegionHealthCheckServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -133609,17 +139692,17 @@ func (c *RegionHealthCheckServicesGetCall) Do(opts ...googleapi.CallOption) (*He if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HealthCheckService{ ServerResponse: googleapi.ServerResponse{ @@ -133786,17 +139869,17 @@ func (c *RegionHealthCheckServicesInsertCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -134036,17 +140119,17 @@ func (c *RegionHealthCheckServicesListCall) Do(opts ...googleapi.CallOption) (*H if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HealthCheckServicesList{ ServerResponse: googleapi.ServerResponse{ @@ -134163,10 +140246,10 @@ type RegionHealthCheckServicesPatchCall struct { // with the data included in the request. This method supports PATCH // semantics and uses the JSON merge patch format and processing rules. // -// - healthCheckService: Name of the HealthCheckService to update. The -// name must be 1-63 characters long, and comply with RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - healthCheckService: Name of the HealthCheckService to update. The +// name must be 1-63 characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthCheckServicesService) Patch(project string, region string, healthCheckService string, healthcheckservice *HealthCheckService) *RegionHealthCheckServicesPatchCall { c := &RegionHealthCheckServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -134261,17 +140344,17 @@ func (c *RegionHealthCheckServicesPatchCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -134441,17 +140524,17 @@ func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -134618,17 +140701,17 @@ func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthChe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HealthCheck{ ServerResponse: googleapi.ServerResponse{ @@ -134796,17 +140879,17 @@ func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -135046,17 +141129,17 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HealthCheckList{ ServerResponse: googleapi.ServerResponse{ @@ -135270,17 +141353,17 @@ func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -135459,17 +141542,17 @@ func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -135660,17 +141743,17 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -135750,11 +141833,11 @@ type RegionInstanceGroupManagersApplyUpdatesToInstancesCall struct { // ApplyUpdatesToInstances: Apply updates to selected instances the // managed instance group. // -// - instanceGroupManager: The name of the managed instance group, -// should conform to RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request, should conform to -// RFC1035. +// - instanceGroupManager: The name of the managed instance group, +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. func (r *RegionInstanceGroupManagersService) ApplyUpdatesToInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { c := &RegionInstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -135833,17 +141916,17 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...goog if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -135923,11 +142006,11 @@ type RegionInstanceGroupManagersCreateInstancesCall struct { // the status of the creating or actions with the listmanagedinstances // method. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - region: The name of the region where the managed instance group is -// located. It should conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where the managed instance group is +// located. It should conform to RFC1035. func (r *RegionInstanceGroupManagersService) CreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagerscreateinstancesrequest *RegionInstanceGroupManagersCreateInstancesRequest) *RegionInstanceGroupManagersCreateInstancesCall { c := &RegionInstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -136021,17 +142104,17 @@ func (c *RegionInstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.Ca if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -136201,17 +142284,17 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -136396,17 +142479,17 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -136486,11 +142569,11 @@ type RegionInstanceGroupManagersDeletePerInstanceConfigsCall struct { // DeletePerInstanceConfigs: Deletes selected per-instance // configurations for the managed instance group. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request, should conform to -// RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. func (r *RegionInstanceGroupManagersService) DeletePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { c := &RegionInstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -136569,17 +142652,17 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...goo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -136742,17 +142825,17 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupManager{ ServerResponse: googleapi.ServerResponse{ @@ -136924,17 +143007,17 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -137173,17 +143256,17 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionInstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ @@ -137299,13 +143382,13 @@ type RegionInstanceGroupManagersListErrorsCall struct { // given regional managed instance group. The filter and orderBy query // parameters are not supported. // -// - instanceGroupManager: The name of the managed instance group. It -// must be a string that meets the requirements in RFC1035, or an -// unsigned long integer: must match regexp pattern: (?:a-z -// (?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. This should -// conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// must be a string that meets the requirements in RFC1035, or an +// unsigned long integer: must match regexp pattern: (?:a-z +// (?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. This should +// conform to RFC1035. func (r *RegionInstanceGroupManagersService) ListErrors(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListErrorsCall { c := &RegionInstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -137475,17 +143558,17 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOpt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionInstanceGroupManagersListErrorsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -137606,7 +143689,10 @@ type RegionInstanceGroupManagersListManagedInstancesCall struct { // ListManagedInstances: Lists the instances in the managed instance // group and instances that are scheduled to be created. The list // includes any current actions that the group has scheduled for its -// instances. The orderBy query parameter is not supported. +// instances. The orderBy query parameter is not supported. The +// `pageToken` query parameter is supported only in the alpha and beta +// API and only if the group's `listManagedInstancesResults` field is +// set to `PAGINATED`. // // - instanceGroupManager: The name of the managed instance group. // - project: Project ID for this request. @@ -137767,17 +143853,17 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionInstanceGroupManagersListInstancesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -137791,7 +143877,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported.", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.listManagedInstances", @@ -137899,11 +143985,11 @@ type RegionInstanceGroupManagersListPerInstanceConfigsCall struct { // defined for the managed instance group. The orderBy query parameter // is not supported. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request, should conform to -// RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. func (r *RegionInstanceGroupManagersService) ListPerInstanceConfigs(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c := &RegionInstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -138060,17 +144146,17 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionInstanceGroupManagersListInstanceConfigsResp{ ServerResponse: googleapi.ServerResponse{ @@ -138298,17 +144384,17 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -138390,11 +144476,11 @@ type RegionInstanceGroupManagersPatchPerInstanceConfigsCall struct { // serves as a key used to distinguish whether to perform insert or // patch. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request, should conform to -// RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. func (r *RegionInstanceGroupManagersService) PatchPerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { c := &RegionInstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -138489,17 +144575,17 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...goog if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -138685,17 +144771,17 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -138782,11 +144868,11 @@ type RegionInstanceGroupManagersResizeCall struct { // draining, it can take up to 60 seconds after the connection draining // duration has elapsed before the VM instance is removed or deleted. // -// - instanceGroupManager: Name of the managed instance group. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - size: Number of instances that should exist in this instance group -// manager. +// - instanceGroupManager: Name of the managed instance group. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - size: Number of instances that should exist in this instance group +// manager. func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -138876,17 +144962,17 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -139070,17 +145156,17 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -139258,17 +145344,17 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -139350,11 +145436,11 @@ type RegionInstanceGroupManagersUpdatePerInstanceConfigsCall struct { // serves as a key used to distinguish whether to perform insert or // patch. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request, should conform to -// RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. func (r *RegionInstanceGroupManagersService) UpdatePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { c := &RegionInstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -139449,17 +145535,17 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -139626,17 +145712,17 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroup{ ServerResponse: googleapi.ServerResponse{ @@ -139875,17 +145961,17 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionInstanceGroupList{ ServerResponse: googleapi.ServerResponse{ @@ -140003,10 +146089,10 @@ type RegionInstanceGroupsListInstancesCall struct { // instances that are running. The orderBy query parameter is not // supported. // -// - instanceGroup: Name of the regional instance group for which we -// want to list the instances. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - instanceGroup: Name of the regional instance group for which we +// want to list the instances. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -140168,17 +146254,17 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionInstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ @@ -140303,10 +146389,10 @@ type RegionInstanceGroupsSetNamedPortsCall struct { // SetNamedPorts: Sets the named ports for the specified regional // instance group. // -// - instanceGroup: The name of the regional instance group where the -// named ports are updated. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - instanceGroup: The name of the regional instance group where the +// named ports are updated. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -140401,17 +146487,17 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -140584,17 +146670,17 @@ func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -140668,11 +146754,11 @@ type RegionNetworkEndpointGroupsDeleteCall struct { // NEG cannot be deleted if it is configured as a backend of a backend // service. // -// - networkEndpointGroup: The name of the network endpoint group to -// delete. It should comply with RFC1035. -// - project: Project ID for this request. -// - region: The name of the region where the network endpoint group is -// located. It should comply with RFC1035. +// - networkEndpointGroup: The name of the network endpoint group to +// delete. It should comply with RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where the network endpoint group is +// located. It should comply with RFC1035. func (r *RegionNetworkEndpointGroupsService) Delete(project string, region string, networkEndpointGroup string) *RegionNetworkEndpointGroupsDeleteCall { c := &RegionNetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -140761,17 +146847,17 @@ func (c *RegionNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -140848,11 +146934,11 @@ type RegionNetworkEndpointGroupsGetCall struct { // Get: Returns the specified network endpoint group. Gets a list of // available network endpoint groups by making a list() request. // -// - networkEndpointGroup: The name of the network endpoint group. It -// should comply with RFC1035. -// - project: Project ID for this request. -// - region: The name of the region where the network endpoint group is -// located. It should comply with RFC1035. +// - networkEndpointGroup: The name of the network endpoint group. It +// should comply with RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where the network endpoint group is +// located. It should comply with RFC1035. func (r *RegionNetworkEndpointGroupsService) Get(project string, region string, networkEndpointGroup string) *RegionNetworkEndpointGroupsGetCall { c := &RegionNetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -140938,17 +147024,17 @@ func (c *RegionNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ @@ -141020,9 +147106,9 @@ type RegionNetworkEndpointGroupsInsertCall struct { // Insert: Creates a network endpoint group in the specified project // using the parameters that are included in the request. // -// - project: Project ID for this request. -// - region: The name of the region where you want to create the network -// endpoint group. It should comply with RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where you want to create the network +// endpoint group. It should comply with RFC1035. func (r *RegionNetworkEndpointGroupsService) Insert(project string, region string, networkendpointgroup *NetworkEndpointGroup) *RegionNetworkEndpointGroupsInsertCall { c := &RegionNetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -141115,17 +147201,17 @@ func (c *RegionNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -141197,9 +147283,9 @@ type RegionNetworkEndpointGroupsListCall struct { // List: Retrieves the list of regional network endpoint groups // available to the specified project in the given region. // -// - project: Project ID for this request. -// - region: The name of the region where the network endpoint group is -// located. It should comply with RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where the network endpoint group is +// located. It should comply with RFC1035. func (r *RegionNetworkEndpointGroupsService) List(project string, region string) *RegionNetworkEndpointGroupsListCall { c := &RegionNetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -141365,17 +147451,17 @@ func (c *RegionNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroupList{ ServerResponse: googleapi.ServerResponse{ @@ -141596,17 +147682,17 @@ func (c *RegionNetworkFirewallPoliciesAddAssociationCall) Do(opts ...googleapi.C if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -141807,17 +147893,17 @@ func (c *RegionNetworkFirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -142007,17 +148093,17 @@ func (c *RegionNetworkFirewallPoliciesCloneRulesCall) Do(opts ...googleapi.CallO if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -142190,17 +148276,17 @@ func (c *RegionNetworkFirewallPoliciesDeleteCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -142366,17 +148452,17 @@ func (c *RegionNetworkFirewallPoliciesGetCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -142450,10 +148536,10 @@ type RegionNetworkFirewallPoliciesGetAssociationCall struct { // GetAssociation: Gets an association with the specified name. // -// - firewallPolicy: Name of the firewall policy to which the queried -// association belongs. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - firewallPolicy: Name of the firewall policy to which the queried +// association belongs. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionNetworkFirewallPoliciesService) GetAssociation(project string, region string, firewallPolicy string) *RegionNetworkFirewallPoliciesGetAssociationCall { c := &RegionNetworkFirewallPoliciesGetAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -142546,17 +148632,17 @@ func (c *RegionNetworkFirewallPoliciesGetAssociationCall) Do(opts ...googleapi.C if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyAssociation{ ServerResponse: googleapi.ServerResponse{ @@ -142725,17 +148811,17 @@ func (c *RegionNetworkFirewallPoliciesGetEffectiveFirewallsCall) Do(opts ...goog if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -142904,17 +148990,17 @@ func (c *RegionNetworkFirewallPoliciesGetIamPolicyCall) Do(opts ...googleapi.Cal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -142994,10 +149080,10 @@ type RegionNetworkFirewallPoliciesGetRuleCall struct { // GetRule: Gets a rule of the specified priority. // -// - firewallPolicy: Name of the firewall policy to which the queried -// rule belongs. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - firewallPolicy: Name of the firewall policy to which the queried +// rule belongs. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionNetworkFirewallPoliciesService) GetRule(project string, region string, firewallPolicy string) *RegionNetworkFirewallPoliciesGetRuleCall { c := &RegionNetworkFirewallPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -143090,17 +149176,17 @@ func (c *RegionNetworkFirewallPoliciesGetRuleCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyRule{ ServerResponse: googleapi.ServerResponse{ @@ -143274,17 +149360,17 @@ func (c *RegionNetworkFirewallPoliciesInsertCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -143524,17 +149610,17 @@ func (c *RegionNetworkFirewallPoliciesListCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyList{ ServerResponse: googleapi.ServerResponse{ @@ -143746,17 +149832,17 @@ func (c *RegionNetworkFirewallPoliciesPatchCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -143941,17 +150027,17 @@ func (c *RegionNetworkFirewallPoliciesPatchRuleCall) Do(opts ...googleapi.CallOp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -144136,17 +150222,17 @@ func (c *RegionNetworkFirewallPoliciesRemoveAssociationCall) Do(opts ...googleap if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -144326,17 +150412,17 @@ func (c *RegionNetworkFirewallPoliciesRemoveRuleCall) Do(opts ...googleapi.CallO if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -144502,17 +150588,17 @@ func (c *RegionNetworkFirewallPoliciesSetIamPolicyCall) Do(opts ...googleapi.Cal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -144670,17 +150756,17 @@ func (c *RegionNetworkFirewallPoliciesTestIamPermissionsCall) Do(opts ...googlea if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -144757,10 +150843,10 @@ type RegionNotificationEndpointsDeleteCall struct { // Delete: Deletes the specified NotificationEndpoint in the given // region // -// - notificationEndpoint: Name of the NotificationEndpoint resource to -// delete. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - notificationEndpoint: Name of the NotificationEndpoint resource to +// delete. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionNotificationEndpointsService) Delete(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsDeleteCall { c := &RegionNotificationEndpointsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -144849,17 +150935,17 @@ func (c *RegionNotificationEndpointsDeleteCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -144938,10 +151024,10 @@ type RegionNotificationEndpointsGetCall struct { // Get: Returns the specified NotificationEndpoint resource in the given // region. // -// - notificationEndpoint: Name of the NotificationEndpoint resource to -// return. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - notificationEndpoint: Name of the NotificationEndpoint resource to +// return. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionNotificationEndpointsService) Get(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsGetCall { c := &RegionNotificationEndpointsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -145027,17 +151113,17 @@ func (c *RegionNotificationEndpointsGetCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NotificationEndpoint{ ServerResponse: googleapi.ServerResponse{ @@ -145205,17 +151291,17 @@ func (c *RegionNotificationEndpointsInsertCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -145455,17 +151541,17 @@ func (c *RegionNotificationEndpointsListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NotificationEndpointList{ ServerResponse: googleapi.ServerResponse{ @@ -145649,7 +151735,7 @@ func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -145797,17 +151883,17 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -146048,17 +152134,17 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ @@ -146112,7 +152198,1008 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/operations", + // "response": { + // "$ref": "OperationList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionOperations.wait": + +type RegionOperationsWaitCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Wait: Waits for the specified Operation resource to return as `DONE` +// or for the request to approach the 2 minute deadline, and retrieves +// the specified Operation resource. This method differs from the `GET` +// method in that it waits for no more than the default deadline (2 +// minutes) and then returns the current state of the operation, which +// might be `DONE` or still in progress. This method is called on a +// best-effort basis. Specifically: - In uncommon cases, when the server +// is overloaded, the request might return before the default deadline +// is reached, or might return after zero seconds. - If the default +// deadline is reached, there is no guarantee that the operation is +// actually done when the method returns. Be prepared to retry if the +// operation is not `DONE`. +// +// - operation: Name of the Operations resource to return. +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *RegionOperationsService) Wait(project string, region string, operation string) *RegionOperationsWaitCall { + c := &RegionOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionOperationsWaitCall) Fields(s ...googleapi.Field) *RegionOperationsWaitCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionOperationsWaitCall) Context(ctx context.Context) *RegionOperationsWaitCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionOperationsWaitCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/operations/{operation}/wait") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "operation": c.operation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionOperations.wait" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress. This method is called on a best-effort basis. Specifically: - In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. - If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`. ", + // "flatPath": "projects/{project}/regions/{region}/operations/{operation}/wait", + // "httpMethod": "POST", + // "id": "compute.regionOperations.wait", + // "parameterOrder": [ + // "project", + // "region", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/operations/{operation}/wait", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionSecurityPolicies.delete": + +type RegionSecurityPoliciesDeleteCall struct { + s *Service + project string + region string + securityPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified policy. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - securityPolicy: Name of the security policy to delete. +func (r *RegionSecurityPoliciesService) Delete(project string, region string, securityPolicy string) *RegionSecurityPoliciesDeleteCall { + c := &RegionSecurityPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.securityPolicy = securityPolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionSecurityPoliciesDeleteCall) RequestId(requestId string) *RegionSecurityPoliciesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSecurityPoliciesDeleteCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSecurityPoliciesDeleteCall) Context(ctx context.Context) *RegionSecurityPoliciesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSecurityPoliciesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSecurityPolicies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified policy.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "httpMethod": "DELETE", + // "id": "compute.regionSecurityPolicies.delete", + // "parameterOrder": [ + // "project", + // "region", + // "securityPolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionSecurityPolicies.get": + +type RegionSecurityPoliciesGetCall struct { + s *Service + project string + region string + securityPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: List all of the ordered rules present in a single specified +// policy. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - securityPolicy: Name of the security policy to get. +func (r *RegionSecurityPoliciesService) Get(project string, region string, securityPolicy string) *RegionSecurityPoliciesGetCall { + c := &RegionSecurityPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.securityPolicy = securityPolicy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSecurityPoliciesGetCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionSecurityPoliciesGetCall) IfNoneMatch(entityTag string) *RegionSecurityPoliciesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSecurityPoliciesGetCall) Context(ctx context.Context) *RegionSecurityPoliciesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSecurityPoliciesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSecurityPolicies.get" call. +// Exactly one of *SecurityPolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SecurityPolicy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPolicy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SecurityPolicy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List all of the ordered rules present in a single specified policy.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "httpMethod": "GET", + // "id": "compute.regionSecurityPolicies.get", + // "parameterOrder": [ + // "project", + // "region", + // "securityPolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to get.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "response": { + // "$ref": "SecurityPolicy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionSecurityPolicies.insert": + +type RegionSecurityPoliciesInsertCall struct { + s *Service + project string + region string + securitypolicy *SecurityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new policy in the specified project using the data +// included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionSecurityPoliciesService) Insert(project string, region string, securitypolicy *SecurityPolicy) *RegionSecurityPoliciesInsertCall { + c := &RegionSecurityPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.securitypolicy = securitypolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionSecurityPoliciesInsertCall) RequestId(requestId string) *RegionSecurityPoliciesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *RegionSecurityPoliciesInsertCall) ValidateOnly(validateOnly bool) *RegionSecurityPoliciesInsertCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSecurityPoliciesInsertCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSecurityPoliciesInsertCall) Context(ctx context.Context) *RegionSecurityPoliciesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSecurityPoliciesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSecurityPolicies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new policy in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies", + // "httpMethod": "POST", + // "id": "compute.regionSecurityPolicies.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "validateOnly": { + // "description": "If true, the request will not be committed.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/securityPolicies", + // "request": { + // "$ref": "SecurityPolicy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionSecurityPolicies.list": + +type RegionSecurityPoliciesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: List all the policies that have been configured for the +// specified project and region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionSecurityPoliciesService) List(project string, region string) *RegionSecurityPoliciesListCall { + c := &RegionSecurityPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionSecurityPoliciesListCall) Filter(filter string) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionSecurityPoliciesListCall) MaxResults(maxResults int64) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionSecurityPoliciesListCall) OrderBy(orderBy string) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionSecurityPoliciesListCall) PageToken(pageToken string) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionSecurityPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSecurityPoliciesListCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionSecurityPoliciesListCall) IfNoneMatch(entityTag string) *RegionSecurityPoliciesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSecurityPoliciesListCall) Context(ctx context.Context) *RegionSecurityPoliciesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSecurityPoliciesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSecurityPolicies.list" call. +// Exactly one of *SecurityPolicyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SecurityPolicyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SecurityPolicyList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List all the policies that have been configured for the specified project and region.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies", + // "httpMethod": "GET", + // "id": "compute.regionSecurityPolicies.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -146124,9 +153211,9 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/operations", + // "path": "projects/{project}/regions/{region}/securityPolicies", // "response": { - // "$ref": "OperationList" + // "$ref": "SecurityPolicyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -146140,7 +153227,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { +func (c *RegionSecurityPoliciesListCall) Pages(ctx context.Context, f func(*SecurityPolicyList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -146158,46 +153245,57 @@ func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationL } } -// method id "compute.regionOperations.wait": +// method id "compute.regionSecurityPolicies.patch": -type RegionOperationsWaitCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionSecurityPoliciesPatchCall struct { + s *Service + project string + region string + securityPolicy string + securitypolicy *SecurityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Wait: Waits for the specified Operation resource to return as `DONE` -// or for the request to approach the 2 minute deadline, and retrieves -// the specified Operation resource. This method differs from the `GET` -// method in that it waits for no more than the default deadline (2 -// minutes) and then returns the current state of the operation, which -// might be `DONE` or still in progress. This method is called on a -// best-effort basis. Specifically: - In uncommon cases, when the server -// is overloaded, the request might return before the default deadline -// is reached, or might return after zero seconds. - If the default -// deadline is reached, there is no guarantee that the operation is -// actually done when the method returns. Be prepared to retry if the -// operation is not `DONE`. +// Patch: Patches the specified policy with the data included in the +// request. To clear fields in the rule, leave the fields empty and +// specify them in the updateMask. This cannot be used to be update the +// rules in the policy. Please use the per rule methods like addRule, +// patchRule, and removeRule instead. // -// - operation: Name of the Operations resource to return. // - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionOperationsService) Wait(project string, region string, operation string) *RegionOperationsWaitCall { - c := &RegionOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +// - securityPolicy: Name of the security policy to update. +func (r *RegionSecurityPoliciesService) Patch(project string, region string, securityPolicy string, securitypolicy *SecurityPolicy) *RegionSecurityPoliciesPatchCall { + c := &RegionSecurityPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.operation = operation + c.securityPolicy = securityPolicy + c.securitypolicy = securitypolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionSecurityPoliciesPatchCall) RequestId(requestId string) *RegionSecurityPoliciesPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsWaitCall) Fields(s ...googleapi.Field) *RegionOperationsWaitCall { +func (c *RegionSecurityPoliciesPatchCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -146205,21 +153303,21 @@ func (c *RegionOperationsWaitCall) Fields(s ...googleapi.Field) *RegionOperation // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsWaitCall) Context(ctx context.Context) *RegionOperationsWaitCall { +func (c *RegionSecurityPoliciesPatchCall) Context(ctx context.Context) *RegionSecurityPoliciesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionOperationsWaitCall) Header() http.Header { +func (c *RegionSecurityPoliciesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -146227,48 +153325,53 @@ func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/operations/{operation}/wait") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, + "project": c.project, + "region": c.region, + "securityPolicy": c.securityPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.wait" call. +// Do executes the "compute.regionSecurityPolicies.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -146282,23 +153385,16 @@ func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress. This method is called on a best-effort basis. Specifically: - In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. - If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`. ", - // "flatPath": "projects/{project}/regions/{region}/operations/{operation}/wait", - // "httpMethod": "POST", - // "id": "compute.regionOperations.wait", + // "description": "Patches the specified policy with the data included in the request. To clear fields in the rule, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "httpMethod": "PATCH", + // "id": "compute.regionSecurityPolicies.patch", // "parameterOrder": [ // "project", // "region", - // "operation" + // "securityPolicy" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -146307,48 +153403,62 @@ func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/operations/{operation}/wait", + // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "request": { + // "$ref": "SecurityPolicy" + // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionSecurityPolicies.delete": +// method id "compute.regionSslCertificates.delete": -type RegionSecurityPoliciesDeleteCall struct { +type RegionSslCertificatesDeleteCall struct { s *Service project string region string - securityPolicy string + sslCertificate string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified policy. +// Delete: Deletes the specified SslCertificate resource in the region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -// - securityPolicy: Name of the security policy to delete. -func (r *RegionSecurityPoliciesService) Delete(project string, region string, securityPolicy string) *RegionSecurityPoliciesDeleteCall { - c := &RegionSecurityPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - sslCertificate: Name of the SslCertificate resource to delete. +func (r *RegionSslCertificatesService) Delete(project string, region string, sslCertificate string) *RegionSslCertificatesDeleteCall { + c := &RegionSslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.securityPolicy = securityPolicy + c.sslCertificate = sslCertificate return c } @@ -146363,7 +153473,7 @@ func (r *RegionSecurityPoliciesService) Delete(project string, region string, se // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionSecurityPoliciesDeleteCall) RequestId(requestId string) *RegionSecurityPoliciesDeleteCall { +func (c *RegionSslCertificatesDeleteCall) RequestId(requestId string) *RegionSslCertificatesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -146371,7 +153481,7 @@ func (c *RegionSecurityPoliciesDeleteCall) RequestId(requestId string) *RegionSe // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSecurityPoliciesDeleteCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesDeleteCall { +func (c *RegionSslCertificatesDeleteCall) Fields(s ...googleapi.Field) *RegionSslCertificatesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -146379,21 +153489,21 @@ func (c *RegionSecurityPoliciesDeleteCall) Fields(s ...googleapi.Field) *RegionS // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSecurityPoliciesDeleteCall) Context(ctx context.Context) *RegionSecurityPoliciesDeleteCall { +func (c *RegionSslCertificatesDeleteCall) Context(ctx context.Context) *RegionSslCertificatesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSecurityPoliciesDeleteCall) Header() http.Header { +func (c *RegionSslCertificatesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -146403,7 +153513,7 @@ func (c *RegionSecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -146413,36 +153523,36 @@ func (c *RegionSecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "securityPolicy": c.securityPolicy, + "sslCertificate": c.sslCertificate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSecurityPolicies.delete" call. +// Do executes the "compute.regionSslCertificates.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionSecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -146456,14 +153566,14 @@ func (c *RegionSecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Deletes the specified policy.", - // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "description": "Deletes the specified SslCertificate resource in the region.", + // "flatPath": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", // "httpMethod": "DELETE", - // "id": "compute.regionSecurityPolicies.delete", + // "id": "compute.regionSslCertificates.delete", // "parameterOrder": [ // "project", // "region", - // "securityPolicy" + // "sslCertificate" // ], // "parameters": { // "project": { @@ -146485,15 +153595,15 @@ func (c *RegionSecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Op // "location": "query", // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to delete.", + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", // "response": { // "$ref": "Operation" // }, @@ -146505,37 +153615,38 @@ func (c *RegionSecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.regionSecurityPolicies.get": +// method id "compute.regionSslCertificates.get": -type RegionSecurityPoliciesGetCall struct { +type RegionSslCertificatesGetCall struct { s *Service project string region string - securityPolicy string + sslCertificate string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: List all of the ordered rules present in a single specified -// policy. +// Get: Returns the specified SslCertificate resource in the specified +// region. Get a list of available SSL certificates by making a list() +// request. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -// - securityPolicy: Name of the security policy to get. -func (r *RegionSecurityPoliciesService) Get(project string, region string, securityPolicy string) *RegionSecurityPoliciesGetCall { - c := &RegionSecurityPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - sslCertificate: Name of the SslCertificate resource to return. +func (r *RegionSslCertificatesService) Get(project string, region string, sslCertificate string) *RegionSslCertificatesGetCall { + c := &RegionSslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.securityPolicy = securityPolicy + c.sslCertificate = sslCertificate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSecurityPoliciesGetCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesGetCall { +func (c *RegionSslCertificatesGetCall) Fields(s ...googleapi.Field) *RegionSslCertificatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -146545,7 +153656,7 @@ func (c *RegionSecurityPoliciesGetCall) Fields(s ...googleapi.Field) *RegionSecu // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionSecurityPoliciesGetCall) IfNoneMatch(entityTag string) *RegionSecurityPoliciesGetCall { +func (c *RegionSslCertificatesGetCall) IfNoneMatch(entityTag string) *RegionSslCertificatesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -146553,21 +153664,21 @@ func (c *RegionSecurityPoliciesGetCall) IfNoneMatch(entityTag string) *RegionSec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSecurityPoliciesGetCall) Context(ctx context.Context) *RegionSecurityPoliciesGetCall { +func (c *RegionSslCertificatesGetCall) Context(ctx context.Context) *RegionSslCertificatesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSecurityPoliciesGetCall) Header() http.Header { +func (c *RegionSslCertificatesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -146580,7 +153691,7 @@ func (c *RegionSecurityPoliciesGetCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -146590,38 +153701,38 @@ func (c *RegionSecurityPoliciesGetCall) doRequest(alt string) (*http.Response, e googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "securityPolicy": c.securityPolicy, + "sslCertificate": c.sslCertificate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSecurityPolicies.get" call. -// Exactly one of *SecurityPolicy or error will be non-nil. Any non-2xx +// Do executes the "compute.regionSslCertificates.get" call. +// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *SecurityPolicy.ServerResponse.Header or (if a response was returned +// *SslCertificate.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPolicy, error) { +func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &SecurityPolicy{ + ret := &SslCertificate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -146633,14 +153744,14 @@ func (c *RegionSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*Secur } return ret, nil // { - // "description": "List all of the ordered rules present in a single specified policy.", - // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", + // "flatPath": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", // "httpMethod": "GET", - // "id": "compute.regionSecurityPolicies.get", + // "id": "compute.regionSslCertificates.get", // "parameterOrder": [ // "project", // "region", - // "securityPolicy" + // "sslCertificate" // ], // "parameters": { // "project": { @@ -146657,17 +153768,17 @@ func (c *RegionSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*Secur // "required": true, // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to get.", + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", // "response": { - // "$ref": "SecurityPolicy" + // "$ref": "SslCertificate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -146678,28 +153789,28 @@ func (c *RegionSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*Secur } -// method id "compute.regionSecurityPolicies.insert": +// method id "compute.regionSslCertificates.insert": -type RegionSecurityPoliciesInsertCall struct { +type RegionSslCertificatesInsertCall struct { s *Service project string region string - securitypolicy *SecurityPolicy + sslcertificate *SslCertificate urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a new policy in the specified project using the data -// included in the request. +// Insert: Creates a SslCertificate resource in the specified project +// and region using the data included in the request // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionSecurityPoliciesService) Insert(project string, region string, securitypolicy *SecurityPolicy) *RegionSecurityPoliciesInsertCall { - c := &RegionSecurityPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionSslCertificatesService) Insert(project string, region string, sslcertificate *SslCertificate) *RegionSslCertificatesInsertCall { + c := &RegionSslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.securitypolicy = securitypolicy + c.sslcertificate = sslcertificate return c } @@ -146714,22 +153825,15 @@ func (r *RegionSecurityPoliciesService) Insert(project string, region string, se // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionSecurityPoliciesInsertCall) RequestId(requestId string) *RegionSecurityPoliciesInsertCall { +func (c *RegionSslCertificatesInsertCall) RequestId(requestId string) *RegionSslCertificatesInsertCall { c.urlParams_.Set("requestId", requestId) return c } -// ValidateOnly sets the optional parameter "validateOnly": If true, the -// request will not be committed. -func (c *RegionSecurityPoliciesInsertCall) ValidateOnly(validateOnly bool) *RegionSecurityPoliciesInsertCall { - c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSecurityPoliciesInsertCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesInsertCall { +func (c *RegionSslCertificatesInsertCall) Fields(s ...googleapi.Field) *RegionSslCertificatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -146737,21 +153841,21 @@ func (c *RegionSecurityPoliciesInsertCall) Fields(s ...googleapi.Field) *RegionS // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSecurityPoliciesInsertCall) Context(ctx context.Context) *RegionSecurityPoliciesInsertCall { +func (c *RegionSslCertificatesInsertCall) Context(ctx context.Context) *RegionSslCertificatesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSecurityPoliciesInsertCall) Header() http.Header { +func (c *RegionSslCertificatesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -146759,14 +153863,14 @@ func (c *RegionSecurityPoliciesInsertCall) doRequest(alt string) (*http.Response } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -146780,31 +153884,31 @@ func (c *RegionSecurityPoliciesInsertCall) doRequest(alt string) (*http.Response return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSecurityPolicies.insert" call. +// Do executes the "compute.regionSslCertificates.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -146818,10 +153922,10 @@ func (c *RegionSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Creates a new policy in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/securityPolicies", + // "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", + // "flatPath": "projects/{project}/regions/{region}/sslCertificates", // "httpMethod": "POST", - // "id": "compute.regionSecurityPolicies.insert", + // "id": "compute.regionSslCertificates.insert", // "parameterOrder": [ // "project", // "region" @@ -146845,16 +153949,11 @@ func (c *RegionSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Op // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "validateOnly": { - // "description": "If true, the request will not be committed.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/securityPolicies", + // "path": "projects/{project}/regions/{region}/sslCertificates", // "request": { - // "$ref": "SecurityPolicy" + // "$ref": "SslCertificate" // }, // "response": { // "$ref": "Operation" @@ -146867,9 +153966,9 @@ func (c *RegionSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.regionSecurityPolicies.list": +// method id "compute.regionSslCertificates.list": -type RegionSecurityPoliciesListCall struct { +type RegionSslCertificatesListCall struct { s *Service project string region string @@ -146879,13 +153978,13 @@ type RegionSecurityPoliciesListCall struct { header_ http.Header } -// List: List all the policies that have been configured for the -// specified project and region. +// List: Retrieves the list of SslCertificate resources available to the +// specified project in the specified region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionSecurityPoliciesService) List(project string, region string) *RegionSecurityPoliciesListCall { - c := &RegionSecurityPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionSslCertificatesService) List(project string, region string) *RegionSslCertificatesListCall { + c := &RegionSslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -146926,7 +154025,7 @@ func (r *RegionSecurityPoliciesService) List(project string, region string) *Reg // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne // .*instance`. -func (c *RegionSecurityPoliciesListCall) Filter(filter string) *RegionSecurityPoliciesListCall { +func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertificatesListCall { c.urlParams_.Set("filter", filter) return c } @@ -146937,7 +154036,7 @@ func (c *RegionSecurityPoliciesListCall) Filter(filter string) *RegionSecurityPo // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionSecurityPoliciesListCall) MaxResults(maxResults int64) *RegionSecurityPoliciesListCall { +func (c *RegionSslCertificatesListCall) MaxResults(maxResults int64) *RegionSslCertificatesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -146951,7 +154050,7 @@ func (c *RegionSecurityPoliciesListCall) MaxResults(maxResults int64) *RegionSec // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionSecurityPoliciesListCall) OrderBy(orderBy string) *RegionSecurityPoliciesListCall { +func (c *RegionSslCertificatesListCall) OrderBy(orderBy string) *RegionSslCertificatesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -146959,7 +154058,7 @@ func (c *RegionSecurityPoliciesListCall) OrderBy(orderBy string) *RegionSecurity // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionSecurityPoliciesListCall) PageToken(pageToken string) *RegionSecurityPoliciesListCall { +func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCertificatesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -146968,7 +154067,7 @@ func (c *RegionSecurityPoliciesListCall) PageToken(pageToken string) *RegionSecu // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionSecurityPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSecurityPoliciesListCall { +func (c *RegionSslCertificatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSslCertificatesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -146976,7 +154075,7 @@ func (c *RegionSecurityPoliciesListCall) ReturnPartialSuccess(returnPartialSucce // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSecurityPoliciesListCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesListCall { +func (c *RegionSslCertificatesListCall) Fields(s ...googleapi.Field) *RegionSslCertificatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -146986,7 +154085,7 @@ func (c *RegionSecurityPoliciesListCall) Fields(s ...googleapi.Field) *RegionSec // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionSecurityPoliciesListCall) IfNoneMatch(entityTag string) *RegionSecurityPoliciesListCall { +func (c *RegionSslCertificatesListCall) IfNoneMatch(entityTag string) *RegionSslCertificatesListCall { c.ifNoneMatch_ = entityTag return c } @@ -146994,21 +154093,21 @@ func (c *RegionSecurityPoliciesListCall) IfNoneMatch(entityTag string) *RegionSe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSecurityPoliciesListCall) Context(ctx context.Context) *RegionSecurityPoliciesListCall { +func (c *RegionSslCertificatesListCall) Context(ctx context.Context) *RegionSslCertificatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSecurityPoliciesListCall) Header() http.Header { +func (c *RegionSslCertificatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -147021,7 +154120,7 @@ func (c *RegionSecurityPoliciesListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -147035,33 +154134,33 @@ func (c *RegionSecurityPoliciesListCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSecurityPolicies.list" call. -// Exactly one of *SecurityPolicyList or error will be non-nil. Any +// Do executes the "compute.regionSslCertificates.list" call. +// Exactly one of *SslCertificateList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *SecurityPolicyList.ServerResponse.Header or (if a response was +// *SslCertificateList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyList, error) { +func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &SecurityPolicyList{ + ret := &SslCertificateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -147073,10 +154172,10 @@ func (c *RegionSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*Secu } return ret, nil // { - // "description": "List all the policies that have been configured for the specified project and region.", - // "flatPath": "projects/{project}/regions/{region}/securityPolicies", + // "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", + // "flatPath": "projects/{project}/regions/{region}/sslCertificates", // "httpMethod": "GET", - // "id": "compute.regionSecurityPolicies.list", + // "id": "compute.regionSslCertificates.list", // "parameterOrder": [ // "project", // "region" @@ -147125,9 +154224,9 @@ func (c *RegionSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*Secu // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/securityPolicies", + // "path": "projects/{project}/regions/{region}/sslCertificates", // "response": { - // "$ref": "SecurityPolicyList" + // "$ref": "SslCertificateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -147141,7 +154240,7 @@ func (c *RegionSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*Secu // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionSecurityPoliciesListCall) Pages(ctx context.Context, f func(*SecurityPolicyList) error) error { +func (c *RegionSslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -147159,217 +154258,31 @@ func (c *RegionSecurityPoliciesListCall) Pages(ctx context.Context, f func(*Secu } } -// method id "compute.regionSecurityPolicies.patch": - -type RegionSecurityPoliciesPatchCall struct { - s *Service - project string - region string - securityPolicy string - securitypolicy *SecurityPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Patches the specified policy with the data included in the -// request. -// -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - securityPolicy: Name of the security policy to update. -func (r *RegionSecurityPoliciesService) Patch(project string, region string, securityPolicy string, securitypolicy *SecurityPolicy) *RegionSecurityPoliciesPatchCall { - c := &RegionSecurityPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.securityPolicy = securityPolicy - c.securitypolicy = securitypolicy - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionSecurityPoliciesPatchCall) RequestId(requestId string) *RegionSecurityPoliciesPatchCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionSecurityPoliciesPatchCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionSecurityPoliciesPatchCall) Context(ctx context.Context) *RegionSecurityPoliciesPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionSecurityPoliciesPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionSecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "securityPolicy": c.securityPolicy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionSecurityPolicies.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionSecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Patches the specified policy with the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", - // "httpMethod": "PATCH", - // "id": "compute.regionSecurityPolicies.patch", - // "parameterOrder": [ - // "project", - // "region", - // "securityPolicy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "securityPolicy": { - // "description": "Name of the security policy to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", - // "request": { - // "$ref": "SecurityPolicy" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionSslCertificates.delete": +// method id "compute.regionSslPolicies.delete": -type RegionSslCertificatesDeleteCall struct { - s *Service - project string - region string - sslCertificate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionSslPoliciesDeleteCall struct { + s *Service + project string + region string + sslPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified SslCertificate resource in the region. +// Delete: Deletes the specified SSL policy. The SSL policy resource can +// be deleted only if it is not in use by any TargetHttpsProxy or +// TargetSslProxy resources. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - sslCertificate: Name of the SslCertificate resource to delete. -func (r *RegionSslCertificatesService) Delete(project string, region string, sslCertificate string) *RegionSslCertificatesDeleteCall { - c := &RegionSslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - sslPolicy: Name of the SSL policy to delete. The name must be 1-63 +// characters long, and comply with RFC1035. +func (r *RegionSslPoliciesService) Delete(project string, region string, sslPolicy string) *RegionSslPoliciesDeleteCall { + c := &RegionSslPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.sslCertificate = sslCertificate + c.sslPolicy = sslPolicy return c } @@ -147384,7 +154297,7 @@ func (r *RegionSslCertificatesService) Delete(project string, region string, ssl // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionSslCertificatesDeleteCall) RequestId(requestId string) *RegionSslCertificatesDeleteCall { +func (c *RegionSslPoliciesDeleteCall) RequestId(requestId string) *RegionSslPoliciesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -147392,7 +154305,7 @@ func (c *RegionSslCertificatesDeleteCall) RequestId(requestId string) *RegionSsl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesDeleteCall) Fields(s ...googleapi.Field) *RegionSslCertificatesDeleteCall { +func (c *RegionSslPoliciesDeleteCall) Fields(s ...googleapi.Field) *RegionSslPoliciesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -147400,21 +154313,21 @@ func (c *RegionSslCertificatesDeleteCall) Fields(s ...googleapi.Field) *RegionSs // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesDeleteCall) Context(ctx context.Context) *RegionSslCertificatesDeleteCall { +func (c *RegionSslPoliciesDeleteCall) Context(ctx context.Context) *RegionSslPoliciesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesDeleteCall) Header() http.Header { +func (c *RegionSslPoliciesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -147424,7 +154337,7 @@ func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -147432,38 +154345,38 @@ func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "sslCertificate": c.sslCertificate, + "project": c.project, + "region": c.region, + "sslPolicy": c.sslPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.delete" call. +// Do executes the "compute.regionSslPolicies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSslPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -147477,14 +154390,14 @@ func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified SslCertificate resource in the region.", - // "flatPath": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + // "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", // "httpMethod": "DELETE", - // "id": "compute.regionSslCertificates.delete", + // "id": "compute.regionSslPolicies.delete", // "parameterOrder": [ // "project", // "region", - // "sslCertificate" + // "sslPolicy" // ], // "parameters": { // "project": { @@ -147506,15 +154419,14 @@ func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "location": "query", // "type": "string" // }, - // "sslCertificate": { - // "description": "Name of the SslCertificate resource to delete.", + // "sslPolicy": { + // "description": "Name of the SSL policy to delete. The name must be 1-63 characters long, and comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + // "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", // "response": { // "$ref": "Operation" // }, @@ -147526,38 +154438,38 @@ func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionSslCertificates.get": +// method id "compute.regionSslPolicies.get": -type RegionSslCertificatesGetCall struct { - s *Service - project string - region string - sslCertificate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionSslPoliciesGetCall struct { + s *Service + project string + region string + sslPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified SslCertificate resource in the specified -// region. Get a list of available SSL certificates by making a list() -// request. +// Get: Lists all of the ordered rules present in a single specified +// policy. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - sslCertificate: Name of the SslCertificate resource to return. -func (r *RegionSslCertificatesService) Get(project string, region string, sslCertificate string) *RegionSslCertificatesGetCall { - c := &RegionSslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 +// characters long, and comply with RFC1035. +func (r *RegionSslPoliciesService) Get(project string, region string, sslPolicy string) *RegionSslPoliciesGetCall { + c := &RegionSslPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.sslCertificate = sslCertificate + c.sslPolicy = sslPolicy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesGetCall) Fields(s ...googleapi.Field) *RegionSslCertificatesGetCall { +func (c *RegionSslPoliciesGetCall) Fields(s ...googleapi.Field) *RegionSslPoliciesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -147567,7 +154479,7 @@ func (c *RegionSslCertificatesGetCall) Fields(s ...googleapi.Field) *RegionSslCe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionSslCertificatesGetCall) IfNoneMatch(entityTag string) *RegionSslCertificatesGetCall { +func (c *RegionSslPoliciesGetCall) IfNoneMatch(entityTag string) *RegionSslPoliciesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -147575,21 +154487,21 @@ func (c *RegionSslCertificatesGetCall) IfNoneMatch(entityTag string) *RegionSslC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesGetCall) Context(ctx context.Context) *RegionSslCertificatesGetCall { +func (c *RegionSslPoliciesGetCall) Context(ctx context.Context) *RegionSslPoliciesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesGetCall) Header() http.Header { +func (c *RegionSslPoliciesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -147602,7 +154514,7 @@ func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -147610,40 +154522,40 @@ func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "sslCertificate": c.sslCertificate, + "project": c.project, + "region": c.region, + "sslPolicy": c.sslPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.get" call. -// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx +// Do executes the "compute.regionSslPolicies.get" call. +// Exactly one of *SslPolicy or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *SslCertificate.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { +// *SslPolicy.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSslPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SslPolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &SslCertificate{ + ret := &SslPolicy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -147655,14 +154567,14 @@ func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCer } return ret, nil // { - // "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", - // "flatPath": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + // "description": "Lists all of the ordered rules present in a single specified policy.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", // "httpMethod": "GET", - // "id": "compute.regionSslCertificates.get", + // "id": "compute.regionSslPolicies.get", // "parameterOrder": [ // "project", // "region", - // "sslCertificate" + // "sslPolicy" // ], // "parameters": { // "project": { @@ -147679,17 +154591,16 @@ func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCer // "required": true, // "type": "string" // }, - // "sslCertificate": { - // "description": "Name of the SslCertificate resource to return.", + // "sslPolicy": { + // "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + // "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", // "response": { - // "$ref": "SslCertificate" + // "$ref": "SslPolicy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -147700,28 +154611,28 @@ func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCer } -// method id "compute.regionSslCertificates.insert": +// method id "compute.regionSslPolicies.insert": -type RegionSslCertificatesInsertCall struct { - s *Service - project string - region string - sslcertificate *SslCertificate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionSslPoliciesInsertCall struct { + s *Service + project string + region string + sslpolicy *SslPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a SslCertificate resource in the specified project -// and region using the data included in the request +// Insert: Creates a new policy in the specified project and region +// using the data included in the request. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionSslCertificatesService) Insert(project string, region string, sslcertificate *SslCertificate) *RegionSslCertificatesInsertCall { - c := &RegionSslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionSslPoliciesService) Insert(project string, region string, sslpolicy *SslPolicy) *RegionSslPoliciesInsertCall { + c := &RegionSslPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.sslcertificate = sslcertificate + c.sslpolicy = sslpolicy return c } @@ -147736,7 +154647,7 @@ func (r *RegionSslCertificatesService) Insert(project string, region string, ssl // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionSslCertificatesInsertCall) RequestId(requestId string) *RegionSslCertificatesInsertCall { +func (c *RegionSslPoliciesInsertCall) RequestId(requestId string) *RegionSslPoliciesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -147744,7 +154655,7 @@ func (c *RegionSslCertificatesInsertCall) RequestId(requestId string) *RegionSsl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesInsertCall) Fields(s ...googleapi.Field) *RegionSslCertificatesInsertCall { +func (c *RegionSslPoliciesInsertCall) Fields(s ...googleapi.Field) *RegionSslPoliciesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -147752,21 +154663,21 @@ func (c *RegionSslCertificatesInsertCall) Fields(s ...googleapi.Field) *RegionSs // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesInsertCall) Context(ctx context.Context) *RegionSslCertificatesInsertCall { +func (c *RegionSslPoliciesInsertCall) Context(ctx context.Context) *RegionSslPoliciesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesInsertCall) Header() http.Header { +func (c *RegionSslPoliciesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -147774,14 +154685,14 @@ func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -147795,31 +154706,31 @@ func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.insert" call. +// Do executes the "compute.regionSslPolicies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -147833,10 +154744,10 @@ func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", - // "flatPath": "projects/{project}/regions/{region}/sslCertificates", + // "description": "Creates a new policy in the specified project and region using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies", // "httpMethod": "POST", - // "id": "compute.regionSslCertificates.insert", + // "id": "compute.regionSslPolicies.insert", // "parameterOrder": [ // "project", // "region" @@ -147862,9 +154773,9 @@ func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/sslCertificates", + // "path": "projects/{project}/regions/{region}/sslPolicies", // "request": { - // "$ref": "SslCertificate" + // "$ref": "SslPolicy" // }, // "response": { // "$ref": "Operation" @@ -147877,9 +154788,9 @@ func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionSslCertificates.list": +// method id "compute.regionSslPolicies.list": -type RegionSslCertificatesListCall struct { +type RegionSslPoliciesListCall struct { s *Service project string region string @@ -147889,13 +154800,13 @@ type RegionSslCertificatesListCall struct { header_ http.Header } -// List: Retrieves the list of SslCertificate resources available to the -// specified project in the specified region. +// List: Lists all the SSL policies that have been configured for the +// specified project and region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionSslCertificatesService) List(project string, region string) *RegionSslCertificatesListCall { - c := &RegionSslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionSslPoliciesService) List(project string, region string) *RegionSslPoliciesListCall { + c := &RegionSslPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -147936,7 +154847,7 @@ func (r *RegionSslCertificatesService) List(project string, region string) *Regi // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne // .*instance`. -func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertificatesListCall { +func (c *RegionSslPoliciesListCall) Filter(filter string) *RegionSslPoliciesListCall { c.urlParams_.Set("filter", filter) return c } @@ -147947,7 +154858,7 @@ func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertific // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionSslCertificatesListCall) MaxResults(maxResults int64) *RegionSslCertificatesListCall { +func (c *RegionSslPoliciesListCall) MaxResults(maxResults int64) *RegionSslPoliciesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -147961,7 +154872,7 @@ func (c *RegionSslCertificatesListCall) MaxResults(maxResults int64) *RegionSslC // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionSslCertificatesListCall) OrderBy(orderBy string) *RegionSslCertificatesListCall { +func (c *RegionSslPoliciesListCall) OrderBy(orderBy string) *RegionSslPoliciesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -147969,7 +154880,7 @@ func (c *RegionSslCertificatesListCall) OrderBy(orderBy string) *RegionSslCertif // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCertificatesListCall { +func (c *RegionSslPoliciesListCall) PageToken(pageToken string) *RegionSslPoliciesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -147978,7 +154889,7 @@ func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCe // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionSslCertificatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSslCertificatesListCall { +func (c *RegionSslPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSslPoliciesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -147986,7 +154897,7 @@ func (c *RegionSslCertificatesListCall) ReturnPartialSuccess(returnPartialSucces // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesListCall) Fields(s ...googleapi.Field) *RegionSslCertificatesListCall { +func (c *RegionSslPoliciesListCall) Fields(s ...googleapi.Field) *RegionSslPoliciesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -147996,7 +154907,7 @@ func (c *RegionSslCertificatesListCall) Fields(s ...googleapi.Field) *RegionSslC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionSslCertificatesListCall) IfNoneMatch(entityTag string) *RegionSslCertificatesListCall { +func (c *RegionSslPoliciesListCall) IfNoneMatch(entityTag string) *RegionSslPoliciesListCall { c.ifNoneMatch_ = entityTag return c } @@ -148004,21 +154915,21 @@ func (c *RegionSslCertificatesListCall) IfNoneMatch(entityTag string) *RegionSsl // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesListCall) Context(ctx context.Context) *RegionSslCertificatesListCall { +func (c *RegionSslPoliciesListCall) Context(ctx context.Context) *RegionSslPoliciesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesListCall) Header() http.Header { +func (c *RegionSslPoliciesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -148031,7 +154942,7 @@ func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -148045,33 +154956,33 @@ func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.list" call. -// Exactly one of *SslCertificateList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SslCertificateList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionSslPolicies.list" call. +// Exactly one of *SslPoliciesList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SslPoliciesList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { +func (c *RegionSslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &SslCertificateList{ + ret := &SslPoliciesList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -148083,10 +154994,10 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe } return ret, nil // { - // "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", - // "flatPath": "projects/{project}/regions/{region}/sslCertificates", + // "description": "Lists all the SSL policies that have been configured for the specified project and region.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies", // "httpMethod": "GET", - // "id": "compute.regionSslCertificates.list", + // "id": "compute.regionSslPolicies.list", // "parameterOrder": [ // "project", // "region" @@ -148135,9 +155046,9 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/sslCertificates", + // "path": "projects/{project}/regions/{region}/sslPolicies", // "response": { - // "$ref": "SslCertificateList" + // "$ref": "SslPoliciesList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -148151,7 +155062,7 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionSslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { +func (c *RegionSslPoliciesListCall) Pages(ctx context.Context, f func(*SslPoliciesList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -148169,6 +155080,468 @@ func (c *RegionSslCertificatesListCall) Pages(ctx context.Context, f func(*SslCe } } +// method id "compute.regionSslPolicies.listAvailableFeatures": + +type RegionSslPoliciesListAvailableFeaturesCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListAvailableFeatures: Lists all features that can be specified in +// the SSL policy when using custom profile. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionSslPoliciesService) ListAvailableFeatures(project string, region string) *RegionSslPoliciesListAvailableFeaturesCall { + c := &RegionSslPoliciesListAvailableFeaturesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionSslPoliciesListAvailableFeaturesCall) Filter(filter string) *RegionSslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionSslPoliciesListAvailableFeaturesCall) MaxResults(maxResults int64) *RegionSslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionSslPoliciesListAvailableFeaturesCall) OrderBy(orderBy string) *RegionSslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionSslPoliciesListAvailableFeaturesCall) PageToken(pageToken string) *RegionSslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionSslPoliciesListAvailableFeaturesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSslPoliciesListAvailableFeaturesCall) Fields(s ...googleapi.Field) *RegionSslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionSslPoliciesListAvailableFeaturesCall) IfNoneMatch(entityTag string) *RegionSslPoliciesListAvailableFeaturesCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSslPoliciesListAvailableFeaturesCall) Context(ctx context.Context) *RegionSslPoliciesListAvailableFeaturesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSslPoliciesListAvailableFeaturesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies/listAvailableFeatures") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSslPolicies.listAvailableFeatures" call. +// Exactly one of *SslPoliciesListAvailableFeaturesResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *SslPoliciesListAvailableFeaturesResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionSslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) (*SslPoliciesListAvailableFeaturesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SslPoliciesListAvailableFeaturesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all features that can be specified in the SSL policy when using custom profile.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies/listAvailableFeatures", + // "httpMethod": "GET", + // "id": "compute.regionSslPolicies.listAvailableFeatures", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/sslPolicies/listAvailableFeatures", + // "response": { + // "$ref": "SslPoliciesListAvailableFeaturesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionSslPolicies.patch": + +type RegionSslPoliciesPatchCall struct { + s *Service + project string + region string + sslPolicy string + sslpolicy *SslPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified SSL policy with the data included in the +// request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 +// characters long, and comply with RFC1035. +func (r *RegionSslPoliciesService) Patch(project string, region string, sslPolicy string, sslpolicy *SslPolicy) *RegionSslPoliciesPatchCall { + c := &RegionSslPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.sslPolicy = sslPolicy + c.sslpolicy = sslpolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionSslPoliciesPatchCall) RequestId(requestId string) *RegionSslPoliciesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSslPoliciesPatchCall) Fields(s ...googleapi.Field) *RegionSslPoliciesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSslPoliciesPatchCall) Context(ctx context.Context) *RegionSslPoliciesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSslPoliciesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "sslPolicy": c.sslPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSslPolicies.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSslPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified SSL policy with the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", + // "httpMethod": "PATCH", + // "id": "compute.regionSslPolicies.patch", + // "parameterOrder": [ + // "project", + // "region", + // "sslPolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sslPolicy": { + // "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", + // "request": { + // "$ref": "SslPolicy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.regionTargetHttpProxies.delete": type RegionTargetHttpProxiesDeleteCall struct { @@ -148274,17 +155647,17 @@ func (c *RegionTargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -148452,17 +155825,17 @@ func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*Targ if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpProxy{ ServerResponse: googleapi.ServerResponse{ @@ -148630,17 +156003,17 @@ func (c *RegionTargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -148880,17 +156253,17 @@ func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*Tar if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -149102,17 +156475,17 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -149283,17 +156656,17 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -149461,17 +156834,17 @@ func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*Tar if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpsProxy{ ServerResponse: googleapi.ServerResponse{ @@ -149639,17 +157012,17 @@ func (c *RegionTargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -149889,17 +157262,17 @@ func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*Ta if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpsProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -150113,17 +157486,17 @@ func (c *RegionTargetHttpsProxiesPatchCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -150204,10 +157577,10 @@ type RegionTargetHttpsProxiesSetSslCertificatesCall struct { // SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set an -// SslCertificates resource for. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set an +// SslCertificates resource for. func (r *RegionTargetHttpsProxiesService) SetSslCertificates(project string, region string, targetHttpsProxy string, regiontargethttpsproxiessetsslcertificatesrequest *RegionTargetHttpsProxiesSetSslCertificatesRequest) *RegionTargetHttpsProxiesSetSslCertificatesCall { c := &RegionTargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -150302,17 +157675,17 @@ func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.Ca if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -150355,18 +157728,385 @@ func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.Ca // "location": "query", // "type": "string" // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + // "request": { + // "$ref": "RegionTargetHttpsProxiesSetSslCertificatesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetHttpsProxies.setUrlMap": + +type RegionTargetHttpsProxiesSetUrlMapCall struct { + s *Service + project string + region string + targetHttpsProxy string + urlmapreference *UrlMapReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetUrlMap: Changes the URL map for TargetHttpsProxy. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy to set a URL map +// for. +func (r *RegionTargetHttpsProxiesService) SetUrlMap(project string, region string, targetHttpsProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpsProxiesSetUrlMapCall { + c := &RegionTargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetHttpsProxy = targetHttpsProxy + c.urlmapreference = urlmapreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpsProxiesSetUrlMapCall) RequestId(requestId string) *RegionTargetHttpsProxiesSetUrlMapCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesSetUrlMapCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *RegionTargetHttpsProxiesSetUrlMapCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpsProxies.setUrlMap" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the URL map for TargetHttpsProxy.", + // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "httpMethod": "POST", + // "id": "compute.regionTargetHttpsProxies.setUrlMap", + // "parameterOrder": [ + // "project", + // "region", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy to set a URL map for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "request": { + // "$ref": "UrlMapReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetTcpProxies.delete": + +type RegionTargetTcpProxiesDeleteCall struct { + s *Service + project string + region string + targetTcpProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified TargetTcpProxy resource. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetTcpProxy: Name of the TargetTcpProxy resource to delete. +func (r *RegionTargetTcpProxiesService) Delete(project string, region string, targetTcpProxy string) *RegionTargetTcpProxiesDeleteCall { + c := &RegionTargetTcpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetTcpProxy = targetTcpProxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionTargetTcpProxiesDeleteCall) RequestId(requestId string) *RegionTargetTcpProxiesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetTcpProxiesDeleteCall) Fields(s ...googleapi.Field) *RegionTargetTcpProxiesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetTcpProxiesDeleteCall) Context(ctx context.Context) *RegionTargetTcpProxiesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetTcpProxiesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetTcpProxy": c.targetTcpProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetTcpProxies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetTcpProxy resource.", + // "flatPath": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + // "httpMethod": "DELETE", + // "id": "compute.regionTargetTcpProxies.delete", + // "parameterOrder": [ + // "project", + // "region", + // "targetTcpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetTcpProxy": { + // "description": "Name of the TargetTcpProxy resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", - // "request": { - // "$ref": "RegionTargetHttpsProxiesSetSslCertificatesRequest" - // }, + // "path": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", // "response": { // "$ref": "Operation" // }, @@ -150378,31 +158118,200 @@ func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.Ca } -// method id "compute.regionTargetHttpsProxies.setUrlMap": +// method id "compute.regionTargetTcpProxies.get": -type RegionTargetHttpsProxiesSetUrlMapCall struct { - s *Service - project string - region string - targetHttpsProxy string - urlmapreference *UrlMapReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetTcpProxiesGetCall struct { + s *Service + project string + region string + targetTcpProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetUrlMap: Changes the URL map for TargetHttpsProxy. +// Get: Returns the specified TargetTcpProxy resource. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy to set a URL map -// for. -func (r *RegionTargetHttpsProxiesService) SetUrlMap(project string, region string, targetHttpsProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpsProxiesSetUrlMapCall { - c := &RegionTargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - targetTcpProxy: Name of the TargetTcpProxy resource to return. +func (r *RegionTargetTcpProxiesService) Get(project string, region string, targetTcpProxy string) *RegionTargetTcpProxiesGetCall { + c := &RegionTargetTcpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.targetHttpsProxy = targetHttpsProxy - c.urlmapreference = urlmapreference + c.targetTcpProxy = targetTcpProxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetTcpProxiesGetCall) Fields(s ...googleapi.Field) *RegionTargetTcpProxiesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionTargetTcpProxiesGetCall) IfNoneMatch(entityTag string) *RegionTargetTcpProxiesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetTcpProxiesGetCall) Context(ctx context.Context) *RegionTargetTcpProxiesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetTcpProxiesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetTcpProxy": c.targetTcpProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetTcpProxies.get" call. +// Exactly one of *TargetTcpProxy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetTcpProxy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionTargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpProxy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TargetTcpProxy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified TargetTcpProxy resource.", + // "flatPath": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + // "httpMethod": "GET", + // "id": "compute.regionTargetTcpProxies.get", + // "parameterOrder": [ + // "project", + // "region", + // "targetTcpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetTcpProxy": { + // "description": "Name of the TargetTcpProxy resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + // "response": { + // "$ref": "TargetTcpProxy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionTargetTcpProxies.insert": + +type RegionTargetTcpProxiesInsertCall struct { + s *Service + project string + region string + targettcpproxy *TargetTcpProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a TargetTcpProxy resource in the specified project +// and region using the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionTargetTcpProxiesService) Insert(project string, region string, targettcpproxy *TargetTcpProxy) *RegionTargetTcpProxiesInsertCall { + c := &RegionTargetTcpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targettcpproxy = targettcpproxy return c } @@ -150417,7 +158326,7 @@ func (r *RegionTargetHttpsProxiesService) SetUrlMap(project string, region strin // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionTargetHttpsProxiesSetUrlMapCall) RequestId(requestId string) *RegionTargetHttpsProxiesSetUrlMapCall { +func (c *RegionTargetTcpProxiesInsertCall) RequestId(requestId string) *RegionTargetTcpProxiesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -150425,7 +158334,7 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesSetUrlMapCall { +func (c *RegionTargetTcpProxiesInsertCall) Fields(s ...googleapi.Field) *RegionTargetTcpProxiesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -150433,21 +158342,21 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *RegionTargetHttpsProxiesSetUrlMapCall { +func (c *RegionTargetTcpProxiesInsertCall) Context(ctx context.Context) *RegionTargetTcpProxiesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpsProxiesSetUrlMapCall) Header() http.Header { +func (c *RegionTargetTcpProxiesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -150455,14 +158364,14 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Res } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targettcpproxy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetTcpProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -150470,38 +158379,37 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetHttpsProxy": c.targetHttpsProxy, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpsProxies.setUrlMap" call. +// Do executes the "compute.regionTargetTcpProxies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -150515,14 +158423,13 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the URL map for TargetHttpsProxy.", - // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "description": "Creates a TargetTcpProxy resource in the specified project and region using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/targetTcpProxies", // "httpMethod": "POST", - // "id": "compute.regionTargetHttpsProxies.setUrlMap", + // "id": "compute.regionTargetTcpProxies.insert", // "parameterOrder": [ // "project", - // "region", - // "targetHttpsProxy" + // "region" // ], // "parameters": { // "project": { @@ -150543,18 +158450,11 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy to set a URL map for.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "path": "projects/{project}/regions/{region}/targetTcpProxies", // "request": { - // "$ref": "UrlMapReference" + // "$ref": "TargetTcpProxy" // }, // "response": { // "$ref": "Operation" @@ -150567,6 +158467,298 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) } +// method id "compute.regionTargetTcpProxies.list": + +type RegionTargetTcpProxiesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of TargetTcpProxy resources available to the +// specified project in a given region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionTargetTcpProxiesService) List(project string, region string) *RegionTargetTcpProxiesListCall { + c := &RegionTargetTcpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionTargetTcpProxiesListCall) Filter(filter string) *RegionTargetTcpProxiesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionTargetTcpProxiesListCall) MaxResults(maxResults int64) *RegionTargetTcpProxiesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionTargetTcpProxiesListCall) OrderBy(orderBy string) *RegionTargetTcpProxiesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionTargetTcpProxiesListCall) PageToken(pageToken string) *RegionTargetTcpProxiesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionTargetTcpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionTargetTcpProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetTcpProxiesListCall) Fields(s ...googleapi.Field) *RegionTargetTcpProxiesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionTargetTcpProxiesListCall) IfNoneMatch(entityTag string) *RegionTargetTcpProxiesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetTcpProxiesListCall) Context(ctx context.Context) *RegionTargetTcpProxiesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetTcpProxiesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetTcpProxies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetTcpProxies.list" call. +// Exactly one of *TargetTcpProxyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetTcpProxyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionTargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpProxyList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TargetTcpProxyList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of TargetTcpProxy resources available to the specified project in a given region.", + // "flatPath": "projects/{project}/regions/{region}/targetTcpProxies", + // "httpMethod": "GET", + // "id": "compute.regionTargetTcpProxies.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/targetTcpProxies", + // "response": { + // "$ref": "TargetTcpProxyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionTargetTcpProxiesListCall) Pages(ctx context.Context, f func(*TargetTcpProxyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.regionUrlMaps.delete": type RegionUrlMapsDeleteCall struct { @@ -150663,17 +158855,17 @@ func (c *RegionUrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -150840,17 +159032,17 @@ func (c *RegionUrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMap{ ServerResponse: googleapi.ServerResponse{ @@ -151009,17 +159201,17 @@ func (c *RegionUrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -151259,17 +159451,17 @@ func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMapList{ ServerResponse: googleapi.ServerResponse{ @@ -151474,17 +159666,17 @@ func (c *RegionUrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -151654,17 +159846,17 @@ func (c *RegionUrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -151828,17 +160020,17 @@ func (c *RegionUrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsVa if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMapsValidateResponse{ ServerResponse: googleapi.ServerResponse{ @@ -152006,17 +160198,17 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Region{ ServerResponse: googleapi.ServerResponse{ @@ -152252,17 +160444,17 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionList{ ServerResponse: googleapi.ServerResponse{ @@ -152544,17 +160736,17 @@ func (c *ReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Rese if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ReservationAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -152756,17 +160948,17 @@ func (c *ReservationsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -152932,17 +161124,17 @@ func (c *ReservationsGetCall) Do(opts ...googleapi.CallOption) (*Reservation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Reservation{ ServerResponse: googleapi.ServerResponse{ @@ -153112,17 +161304,17 @@ func (c *ReservationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -153296,17 +161488,17 @@ func (c *ReservationsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -153546,17 +161738,17 @@ func (c *ReservationsListCall) Do(opts ...googleapi.CallOption) (*ReservationLis if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ReservationList{ ServerResponse: googleapi.ServerResponse{ @@ -153770,17 +161962,17 @@ func (c *ReservationsResizeCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -153943,17 +162135,17 @@ func (c *ReservationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -154111,17 +162303,17 @@ func (c *ReservationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -154308,17 +162500,17 @@ func (c *ReservationsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -154585,17 +162777,17 @@ func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResourcePolicyAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -154797,17 +162989,17 @@ func (c *ResourcePoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -154973,17 +163165,17 @@ func (c *ResourcePoliciesGetCall) Do(opts ...googleapi.CallOption) (*ResourcePol if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResourcePolicy{ ServerResponse: googleapi.ServerResponse{ @@ -155153,17 +163345,17 @@ func (c *ResourcePoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Po if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -155336,17 +163528,17 @@ func (c *ResourcePoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -155586,17 +163778,17 @@ func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResourcePolicyList{ ServerResponse: googleapi.ServerResponse{ @@ -155793,17 +163985,17 @@ func (c *ResourcePoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Po if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -155961,17 +164153,17 @@ func (c *ResourcePoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -156223,17 +164415,17 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RouterAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -156435,17 +164627,17 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -156612,17 +164804,17 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Router{ ServerResponse: googleapi.ServerResponse{ @@ -156697,10 +164889,10 @@ type RoutersGetNatMappingInfoCall struct { // GetNatMappingInfo: Retrieves runtime Nat mapping information of VM // endpoints. // -// - project: Project ID for this request. -// - region: Name of the region for this request. -// - router: Name of the Router resource to query for Nat Mapping -// information of VM endpoints. +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - router: Name of the Router resource to query for Nat Mapping +// information of VM endpoints. func (r *RoutersService) GetNatMappingInfo(project string, region string, router string) *RoutersGetNatMappingInfoCall { c := &RoutersGetNatMappingInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -156868,17 +165060,17 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VmEndpointNatMappingsList{ ServerResponse: googleapi.ServerResponse{ @@ -157090,17 +165282,17 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RouterStatusResponse{ ServerResponse: googleapi.ServerResponse{ @@ -157268,17 +165460,17 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -157518,17 +165710,17 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RouterList{ ServerResponse: googleapi.ServerResponse{ @@ -157742,17 +165934,17 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -157916,17 +166108,17 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RoutersPreviewResponse{ ServerResponse: googleapi.ServerResponse{ @@ -158104,17 +166296,17 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -158281,17 +166473,17 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -158446,17 +166638,17 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Route{ ServerResponse: googleapi.ServerResponse{ @@ -158612,17 +166804,17 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -158850,17 +167042,17 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RouteList{ ServerResponse: googleapi.ServerResponse{ @@ -159051,17 +167243,17 @@ func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -159310,17 +167502,17 @@ func (c *SecurityPoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SecurityPoliciesAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -159518,17 +167710,17 @@ func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -159683,17 +167875,17 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SecurityPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -159758,9 +167950,9 @@ type SecurityPoliciesGetRuleCall struct { // GetRule: Gets a rule at the specified priority. // -// - project: Project ID for this request. -// - securityPolicy: Name of the security policy to which the queried -// rule belongs. +// - project: Project ID for this request. +// - securityPolicy: Name of the security policy to which the queried +// rule belongs. func (r *SecurityPoliciesService) GetRule(project string, securityPolicy string) *SecurityPoliciesGetRuleCall { c := &SecurityPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -159851,17 +168043,17 @@ func (c *SecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*Securit if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SecurityPolicyRule{ ServerResponse: googleapi.ServerResponse{ @@ -160030,17 +168222,17 @@ func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -160273,17 +168465,17 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SecurityPolicyList{ ServerResponse: googleapi.ServerResponse{ @@ -160556,17 +168748,17 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Do(opts ...googlea if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SecurityPoliciesListPreconfiguredExpressionSetsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -160650,9 +168842,10 @@ type SecurityPoliciesPatchCall struct { } // Patch: Patches the specified policy with the data included in the -// request. This cannot be used to be update the rules in the policy. -// Please use the per rule methods like addRule, patchRule, and -// removeRule instead. +// request. To clear fields in the rule, leave the fields empty and +// specify them in the updateMask. This cannot be used to be update the +// rules in the policy. Please use the per rule methods like addRule, +// patchRule, and removeRule instead. // // - project: Project ID for this request. // - securityPolicy: Name of the security policy to update. @@ -160748,17 +168941,17 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -160772,7 +168965,7 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Patches the specified policy with the data included in the request. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", + // "description": "Patches the specified policy with the data included in the request. To clear fields in the rule, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", // "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}", // "httpMethod": "PATCH", // "id": "compute.securityPolicies.patch", @@ -160922,17 +169115,17 @@ func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -161088,17 +169281,17 @@ func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -161154,6 +169347,162 @@ func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Oper } +// method id "compute.securityPolicies.setLabels": + +type SecurityPoliciesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a security policy. To learn more about +// labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *SecurityPoliciesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *SecurityPoliciesSetLabelsCall { + c := &SecurityPoliciesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesSetLabelsCall) Fields(s ...googleapi.Field) *SecurityPoliciesSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesSetLabelsCall) Context(ctx context.Context) *SecurityPoliciesSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/securityPolicies/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SecurityPoliciesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a security policy. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/global/securityPolicies/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.securityPolicies.setLabels", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/securityPolicies/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.serviceAttachments.aggregatedList": type ServiceAttachmentsAggregatedListCall struct { @@ -161345,17 +169694,17 @@ func (c *ServiceAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ServiceAttachmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -161466,10 +169815,10 @@ type ServiceAttachmentsDeleteCall struct { // Delete: Deletes the specified ServiceAttachment in the given scope // -// - project: Project ID for this request. -// - region: Name of the region of this request. -// - serviceAttachment: Name of the ServiceAttachment resource to -// delete. +// - project: Project ID for this request. +// - region: Name of the region of this request. +// - serviceAttachment: Name of the ServiceAttachment resource to +// delete. func (r *ServiceAttachmentsService) Delete(project string, region string, serviceAttachment string) *ServiceAttachmentsDeleteCall { c := &ServiceAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -161558,17 +169907,17 @@ func (c *ServiceAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -161647,10 +169996,10 @@ type ServiceAttachmentsGetCall struct { // Get: Returns the specified ServiceAttachment resource in the given // scope. // -// - project: Project ID for this request. -// - region: Name of the region of this request. -// - serviceAttachment: Name of the ServiceAttachment resource to -// return. +// - project: Project ID for this request. +// - region: Name of the region of this request. +// - serviceAttachment: Name of the ServiceAttachment resource to +// return. func (r *ServiceAttachmentsService) Get(project string, region string, serviceAttachment string) *ServiceAttachmentsGetCall { c := &ServiceAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -161736,17 +170085,17 @@ func (c *ServiceAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*ServiceAt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ServiceAttachment{ ServerResponse: googleapi.ServerResponse{ @@ -161916,17 +170265,17 @@ func (c *ServiceAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -162100,17 +170449,17 @@ func (c *ServiceAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -162349,17 +170698,17 @@ func (c *ServiceAttachmentsListCall) Do(opts ...googleapi.CallOption) (*ServiceA if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ServiceAttachmentList{ ServerResponse: googleapi.ServerResponse{ @@ -162476,12 +170825,12 @@ type ServiceAttachmentsPatchCall struct { // included in the request. This method supports PATCH semantics and // uses JSON merge patch format and processing rules. // -// - project: Project ID for this request. -// - region: The region scoping this request and should conform to -// RFC1035. -// - serviceAttachment: The resource id of the ServiceAttachment to -// patch. It should conform to RFC1035 resource name or be a string -// form on an unsigned long number. +// - project: Project ID for this request. +// - region: The region scoping this request and should conform to +// RFC1035. +// - serviceAttachment: The resource id of the ServiceAttachment to +// patch. It should conform to RFC1035 resource name or be a string +// form on an unsigned long number. func (r *ServiceAttachmentsService) Patch(project string, region string, serviceAttachment string, serviceattachment *ServiceAttachment) *ServiceAttachmentsPatchCall { c := &ServiceAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -162576,17 +170925,17 @@ func (c *ServiceAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -162747,17 +171096,17 @@ func (c *ServiceAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -162915,17 +171264,17 @@ func (c *ServiceAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -163093,17 +171442,17 @@ func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -163258,17 +171607,17 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Snapshot{ ServerResponse: googleapi.ServerResponse{ @@ -163426,17 +171775,17 @@ func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -163601,17 +171950,17 @@ func (c *SnapshotsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -163839,17 +172188,17 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SnapshotList{ ServerResponse: googleapi.ServerResponse{ @@ -164034,17 +172383,17 @@ func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -164190,17 +172539,17 @@ func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -164346,17 +172695,17 @@ func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -164601,17 +172950,17 @@ func (c *SslCertificatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*S if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslCertificateAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -164809,17 +173158,17 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -164974,17 +173323,17 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslCertificate{ ServerResponse: googleapi.ServerResponse{ @@ -165140,17 +173489,17 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -165378,17 +173727,17 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslCertificateList{ ServerResponse: googleapi.ServerResponse{ @@ -165480,6 +173829,304 @@ func (c *SslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertific } } +// method id "compute.sslPolicies.aggregatedList": + +type SslPoliciesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all SslPolicy resources, +// regional and global, available to the specified project. +// +// - project: Name of the project scoping this request. +func (r *SslPoliciesService) AggregatedList(project string) *SslPoliciesAggregatedListCall { + c := &SslPoliciesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *SslPoliciesAggregatedListCall) Filter(filter string) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *SslPoliciesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *SslPoliciesAggregatedListCall) MaxResults(maxResults int64) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *SslPoliciesAggregatedListCall) OrderBy(orderBy string) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *SslPoliciesAggregatedListCall) PageToken(pageToken string) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *SslPoliciesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslPoliciesAggregatedListCall) Fields(s ...googleapi.Field) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslPoliciesAggregatedListCall) IfNoneMatch(entityTag string) *SslPoliciesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslPoliciesAggregatedListCall) Context(ctx context.Context) *SslPoliciesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslPoliciesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslPoliciesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/sslPolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.sslPolicies.aggregatedList" call. +// Exactly one of *SslPoliciesAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *SslPoliciesAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslPoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SslPoliciesAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all SslPolicy resources, regional and global, available to the specified project.", + // "flatPath": "projects/{project}/aggregated/sslPolicies", + // "httpMethod": "GET", + // "id": "compute.sslPolicies.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/sslPolicies", + // "response": { + // "$ref": "SslPoliciesAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SslPoliciesAggregatedListCall) Pages(ctx context.Context, f func(*SslPoliciesAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.sslPolicies.delete": type SslPoliciesDeleteCall struct { @@ -165495,9 +174142,9 @@ type SslPoliciesDeleteCall struct { // be deleted only if it is not in use by any TargetHttpsProxy or // TargetSslProxy resources. // -// - project: Project ID for this request. -// - sslPolicy: Name of the SSL policy to delete. The name must be 1-63 -// characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - sslPolicy: Name of the SSL policy to delete. The name must be 1-63 +// characters long, and comply with RFC1035. func (r *SslPoliciesService) Delete(project string, sslPolicy string) *SslPoliciesDeleteCall { c := &SslPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -165584,17 +174231,17 @@ func (c *SslPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -165663,9 +174310,9 @@ type SslPoliciesGetCall struct { // Get: Lists all of the ordered rules present in a single specified // policy. // -// - project: Project ID for this request. -// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 -// characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 +// characters long, and comply with RFC1035. func (r *SslPoliciesService) Get(project string, sslPolicy string) *SslPoliciesGetCall { c := &SslPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -165749,17 +174396,17 @@ func (c *SslPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SslPolicy, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -165914,17 +174561,17 @@ func (c *SslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -166152,17 +174799,17 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslPoliciesList{ ServerResponse: googleapi.ServerResponse{ @@ -166434,17 +175081,17 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslPoliciesListAvailableFeaturesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -166530,9 +175177,9 @@ type SslPoliciesPatchCall struct { // Patch: Patches the specified SSL policy with the data included in the // request. // -// - project: Project ID for this request. -// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 -// characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 +// characters long, and comply with RFC1035. func (r *SslPoliciesService) Patch(project string, sslPolicy string, sslpolicy *SslPolicy) *SslPoliciesPatchCall { c := &SslPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -166625,17 +175272,17 @@ func (c *SslPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -166882,17 +175529,17 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SubnetworkAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -167094,17 +175741,17 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -167280,17 +175927,17 @@ func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -167460,17 +176107,17 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Subnetwork{ ServerResponse: googleapi.ServerResponse{ @@ -167640,17 +176287,17 @@ func (c *SubnetworksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -167824,17 +176471,17 @@ func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -168074,17 +176721,17 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SubnetworkList{ ServerResponse: googleapi.ServerResponse{ @@ -168362,17 +177009,17 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UsableSubnetworksAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -168594,17 +177241,17 @@ func (c *SubnetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -168773,17 +177420,17 @@ func (c *SubnetworksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -168958,17 +177605,17 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -169131,17 +177778,17 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -169304,17 +177951,17 @@ func (c *TargetGrpcProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -169469,17 +178116,17 @@ func (c *TargetGrpcProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetGrpc if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetGrpcProxy{ ServerResponse: googleapi.ServerResponse{ @@ -169635,17 +178282,17 @@ func (c *TargetGrpcProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -169872,17 +178519,17 @@ func (c *TargetGrpcProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetGrp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetGrpcProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -170084,17 +178731,17 @@ func (c *TargetGrpcProxiesPatchCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -170343,17 +178990,17 @@ func (c *TargetHttpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpProxyAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -170551,17 +179198,17 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -170716,17 +179363,17 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpProxy{ ServerResponse: googleapi.ServerResponse{ @@ -170882,17 +179529,17 @@ func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -171120,17 +179767,17 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -171332,17 +179979,17 @@ func (c *TargetHttpProxiesPatchCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -171508,17 +180155,17 @@ func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -171767,17 +180414,17 @@ func (c *TargetHttpsProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpsProxyAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -171975,17 +180622,17 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -172140,17 +180787,17 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpsProxy{ ServerResponse: googleapi.ServerResponse{ @@ -172306,17 +180953,17 @@ func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -172544,17 +181191,17 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpsProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -172756,17 +181403,17 @@ func (c *TargetHttpsProxiesPatchCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -172838,10 +181485,10 @@ type TargetHttpsProxiesSetCertificateMapCall struct { // SetCertificateMap: Changes the Certificate Map for TargetHttpsProxy. // -// - project: Project ID for this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose -// CertificateMap is to be set. The name must be 1-63 characters long, -// and comply with RFC1035. +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose +// CertificateMap is to be set. The name must be 1-63 characters long, +// and comply with RFC1035. func (r *TargetHttpsProxiesService) SetCertificateMap(project string, targetHttpsProxy string, targethttpsproxiessetcertificatemaprequest *TargetHttpsProxiesSetCertificateMapRequest) *TargetHttpsProxiesSetCertificateMapCall { c := &TargetHttpsProxiesSetCertificateMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -172934,17 +181581,17 @@ func (c *TargetHttpsProxiesSetCertificateMapCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -173015,9 +181662,9 @@ type TargetHttpsProxiesSetQuicOverrideCall struct { // SetQuicOverride: Sets the QUIC override policy for TargetHttpsProxy. // -// - project: Project ID for this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set the -// QUIC override policy for. The name should conform to RFC1035. +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set the +// QUIC override policy for. The name should conform to RFC1035. func (r *TargetHttpsProxiesService) SetQuicOverride(project string, targetHttpsProxy string, targethttpsproxiessetquicoverriderequest *TargetHttpsProxiesSetQuicOverrideRequest) *TargetHttpsProxiesSetQuicOverrideCall { c := &TargetHttpsProxiesSetQuicOverrideCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -173110,17 +181757,17 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -173191,9 +181838,9 @@ type TargetHttpsProxiesSetSslCertificatesCall struct { // SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. // -// - project: Project ID for this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set an -// SslCertificates resource for. +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set an +// SslCertificates resource for. func (r *TargetHttpsProxiesService) SetSslCertificates(project string, targetHttpsProxy string, targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest) *TargetHttpsProxiesSetSslCertificatesCall { c := &TargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -173286,17 +181933,17 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -173372,10 +182019,10 @@ type TargetHttpsProxiesSetSslPolicyCall struct { // balancer. They do not affect the connection between the load balancer // and the backends. // -// - project: Project ID for this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose SSL -// policy is to be set. The name must be 1-63 characters long, and -// comply with RFC1035. +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose SSL +// policy is to be set. The name must be 1-63 characters long, and +// comply with RFC1035. func (r *TargetHttpsProxiesService) SetSslPolicy(project string, targetHttpsProxy string, sslpolicyreference *SslPolicyReference) *TargetHttpsProxiesSetSslPolicyCall { c := &TargetHttpsProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -173468,17 +182115,17 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -173549,9 +182196,9 @@ type TargetHttpsProxiesSetUrlMapCall struct { // SetUrlMap: Changes the URL map for TargetHttpsProxy. // -// - project: Project ID for this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose URL -// map is to be set. +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose URL +// map is to be set. func (r *TargetHttpsProxiesService) SetUrlMap(project string, targetHttpsProxy string, urlmapreference *UrlMapReference) *TargetHttpsProxiesSetUrlMapCall { c := &TargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -173644,17 +182291,17 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -173902,17 +182549,17 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetInstanceAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -174114,17 +182761,17 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -174291,17 +182938,17 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetInstance{ ServerResponse: googleapi.ServerResponse{ @@ -174469,17 +183116,17 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -174719,17 +183366,17 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetInstanceList{ ServerResponse: googleapi.ServerResponse{ @@ -174941,17 +183588,17 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -175129,17 +183776,17 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -175395,17 +184042,17 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetPoolAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -175607,17 +184254,17 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -175784,17 +184431,17 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetPool{ ServerResponse: googleapi.ServerResponse{ @@ -175869,10 +184516,10 @@ type TargetPoolsGetHealthCall struct { // GetHealth: Gets the most recent health check results for each IP for // the instance that is referenced by the given target pool. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - targetPool: Name of the TargetPool resource to which the queried -// instance belongs. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to which the queried +// instance belongs. func (r *TargetPoolsService) GetHealth(project string, region string, targetPool string, instancereference *InstanceReference) *TargetPoolsGetHealthCall { c := &TargetPoolsGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -175951,17 +184598,17 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetPoolInstanceHealth{ ServerResponse: googleapi.ServerResponse{ @@ -176132,17 +184779,17 @@ func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -176382,17 +185029,17 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetPoolList{ ServerResponse: googleapi.ServerResponse{ @@ -176604,17 +185251,17 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -176695,10 +185342,10 @@ type TargetPoolsRemoveInstanceCall struct { // RemoveInstance: Removes instance URL from a target pool. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - targetPool: Name of the TargetPool resource to remove instances -// from. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to remove instances +// from. func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall { c := &TargetPoolsRemoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -176793,17 +185440,17 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -176884,10 +185531,10 @@ type TargetPoolsSetBackupCall struct { // SetBackup: Changes a backup target pool's configurations. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - targetPool: Name of the TargetPool resource to set a backup pool -// for. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to set a backup pool +// for. func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall { c := &TargetPoolsSetBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -176989,17 +185636,17 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -177172,17 +185819,17 @@ func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -177337,17 +185984,17 @@ func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslPr if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetSslProxy{ ServerResponse: googleapi.ServerResponse{ @@ -177503,17 +186150,17 @@ func (c *TargetSslProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -177741,17 +186388,17 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetSslProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -177857,9 +186504,9 @@ type TargetSslProxiesSetBackendServiceCall struct { // SetBackendService: Changes the BackendService for TargetSslProxy. // -// - project: Project ID for this request. -// - targetSslProxy: Name of the TargetSslProxy resource whose -// BackendService resource is to be set. +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose +// BackendService resource is to be set. func (r *TargetSslProxiesService) SetBackendService(project string, targetSslProxy string, targetsslproxiessetbackendservicerequest *TargetSslProxiesSetBackendServiceRequest) *TargetSslProxiesSetBackendServiceCall { c := &TargetSslProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -177952,17 +186599,17 @@ func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -178034,10 +186681,10 @@ type TargetSslProxiesSetCertificateMapCall struct { // SetCertificateMap: Changes the Certificate Map for TargetSslProxy. // -// - project: Project ID for this request. -// - targetSslProxy: Name of the TargetSslProxy resource whose -// CertificateMap is to be set. The name must be 1-63 characters long, -// and comply with RFC1035. +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose +// CertificateMap is to be set. The name must be 1-63 characters long, +// and comply with RFC1035. func (r *TargetSslProxiesService) SetCertificateMap(project string, targetSslProxy string, targetsslproxiessetcertificatemaprequest *TargetSslProxiesSetCertificateMapRequest) *TargetSslProxiesSetCertificateMapCall { c := &TargetSslProxiesSetCertificateMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -178130,17 +186777,17 @@ func (c *TargetSslProxiesSetCertificateMapCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -178211,9 +186858,9 @@ type TargetSslProxiesSetProxyHeaderCall struct { // SetProxyHeader: Changes the ProxyHeaderType for TargetSslProxy. // -// - project: Project ID for this request. -// - targetSslProxy: Name of the TargetSslProxy resource whose -// ProxyHeader is to be set. +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose +// ProxyHeader is to be set. func (r *TargetSslProxiesService) SetProxyHeader(project string, targetSslProxy string, targetsslproxiessetproxyheaderrequest *TargetSslProxiesSetProxyHeaderRequest) *TargetSslProxiesSetProxyHeaderCall { c := &TargetSslProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -178306,17 +186953,17 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -178388,9 +187035,9 @@ type TargetSslProxiesSetSslCertificatesCall struct { // SetSslCertificates: Changes SslCertificates for TargetSslProxy. // -// - project: Project ID for this request. -// - targetSslProxy: Name of the TargetSslProxy resource whose -// SslCertificate resource is to be set. +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose +// SslCertificate resource is to be set. func (r *TargetSslProxiesService) SetSslCertificates(project string, targetSslProxy string, targetsslproxiessetsslcertificatesrequest *TargetSslProxiesSetSslCertificatesRequest) *TargetSslProxiesSetSslCertificatesCall { c := &TargetSslProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -178483,17 +187130,17 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -178568,10 +187215,10 @@ type TargetSslProxiesSetSslPolicyCall struct { // connections between clients and the SSL proxy load balancer. They do // not affect the connection between the load balancer and the backends. // -// - project: Project ID for this request. -// - targetSslProxy: Name of the TargetSslProxy resource whose SSL -// policy is to be set. The name must be 1-63 characters long, and -// comply with RFC1035. +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose SSL +// policy is to be set. The name must be 1-63 characters long, and +// comply with RFC1035. func (r *TargetSslProxiesService) SetSslPolicy(project string, targetSslProxy string, sslpolicyreference *SslPolicyReference) *TargetSslProxiesSetSslPolicyCall { c := &TargetSslProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -178664,17 +187311,17 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -178731,6 +187378,304 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op } +// method id "compute.targetTcpProxies.aggregatedList": + +type TargetTcpProxiesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all TargetTcpProxy resources, +// regional and global, available to the specified project. +// +// - project: Name of the project scoping this request. +func (r *TargetTcpProxiesService) AggregatedList(project string) *TargetTcpProxiesAggregatedListCall { + c := &TargetTcpProxiesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *TargetTcpProxiesAggregatedListCall) Filter(filter string) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *TargetTcpProxiesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *TargetTcpProxiesAggregatedListCall) MaxResults(maxResults int64) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *TargetTcpProxiesAggregatedListCall) OrderBy(orderBy string) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *TargetTcpProxiesAggregatedListCall) PageToken(pageToken string) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *TargetTcpProxiesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetTcpProxiesAggregatedListCall) Fields(s ...googleapi.Field) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetTcpProxiesAggregatedListCall) IfNoneMatch(entityTag string) *TargetTcpProxiesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetTcpProxiesAggregatedListCall) Context(ctx context.Context) *TargetTcpProxiesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetTcpProxiesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetTcpProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/targetTcpProxies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetTcpProxies.aggregatedList" call. +// Exactly one of *TargetTcpProxyAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *TargetTcpProxyAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetTcpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetTcpProxyAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TargetTcpProxyAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all TargetTcpProxy resources, regional and global, available to the specified project.", + // "flatPath": "projects/{project}/aggregated/targetTcpProxies", + // "httpMethod": "GET", + // "id": "compute.targetTcpProxies.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/targetTcpProxies", + // "response": { + // "$ref": "TargetTcpProxyAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetTcpProxiesAggregatedListCall) Pages(ctx context.Context, f func(*TargetTcpProxyAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.targetTcpProxies.delete": type TargetTcpProxiesDeleteCall struct { @@ -178832,17 +187777,17 @@ func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -178997,17 +187942,17 @@ func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpPr if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetTcpProxy{ ServerResponse: googleapi.ServerResponse{ @@ -179163,17 +188108,17 @@ func (c *TargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -179401,17 +188346,17 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetTcpProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -179517,9 +188462,9 @@ type TargetTcpProxiesSetBackendServiceCall struct { // SetBackendService: Changes the BackendService for TargetTcpProxy. // -// - project: Project ID for this request. -// - targetTcpProxy: Name of the TargetTcpProxy resource whose -// BackendService resource is to be set. +// - project: Project ID for this request. +// - targetTcpProxy: Name of the TargetTcpProxy resource whose +// BackendService resource is to be set. func (r *TargetTcpProxiesService) SetBackendService(project string, targetTcpProxy string, targettcpproxiessetbackendservicerequest *TargetTcpProxiesSetBackendServiceRequest) *TargetTcpProxiesSetBackendServiceCall { c := &TargetTcpProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -179612,17 +188557,17 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -179694,9 +188639,9 @@ type TargetTcpProxiesSetProxyHeaderCall struct { // SetProxyHeader: Changes the ProxyHeaderType for TargetTcpProxy. // -// - project: Project ID for this request. -// - targetTcpProxy: Name of the TargetTcpProxy resource whose -// ProxyHeader is to be set. +// - project: Project ID for this request. +// - targetTcpProxy: Name of the TargetTcpProxy resource whose +// ProxyHeader is to be set. func (r *TargetTcpProxiesService) SetProxyHeader(project string, targetTcpProxy string, targettcpproxiessetproxyheaderrequest *TargetTcpProxiesSetProxyHeaderRequest) *TargetTcpProxiesSetProxyHeaderCall { c := &TargetTcpProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -179789,17 +188734,17 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -180047,17 +188992,17 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetVpnGatewayAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -180259,17 +189204,17 @@ func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -180436,17 +189381,17 @@ func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnG if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetVpnGateway{ ServerResponse: googleapi.ServerResponse{ @@ -180614,17 +189559,17 @@ func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -180864,17 +189809,17 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetVpnGatewayList{ ServerResponse: googleapi.ServerResponse{ @@ -180974,6 +189919,195 @@ func (c *TargetVpnGatewaysListCall) Pages(ctx context.Context, f func(*TargetVpn } } +// method id "compute.targetVpnGateways.setLabels": + +type TargetVpnGatewaysSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a TargetVpnGateway. To learn more about +// labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - region: The region for this request. +// - resource: Name or id of the resource for this request. +func (r *TargetVpnGatewaysService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *TargetVpnGatewaysSetLabelsCall { + c := &TargetVpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *TargetVpnGatewaysSetLabelsCall) RequestId(requestId string) *TargetVpnGatewaysSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetVpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetVpnGatewaysSetLabelsCall) Context(ctx context.Context) *TargetVpnGatewaysSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetVpnGatewaysSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetVpnGateways/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetVpnGateways.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a TargetVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.targetVpnGateways.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.urlMaps.aggregatedList": type UrlMapsAggregatedListCall struct { @@ -181165,17 +190299,17 @@ func (c *UrlMapsAggregatedListCall) Do(opts ...googleapi.CallOption) (*UrlMapsAg if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMapsAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -181373,17 +190507,17 @@ func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -181538,17 +190672,17 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMap{ ServerResponse: googleapi.ServerResponse{ @@ -181704,17 +190838,17 @@ func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -181875,17 +191009,17 @@ func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -182121,17 +191255,17 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMapList{ ServerResponse: googleapi.ServerResponse{ @@ -182333,17 +191467,17 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -182510,17 +191644,17 @@ func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -182672,17 +191806,17 @@ func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidate if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMapsValidateResponse{ ServerResponse: googleapi.ServerResponse{ @@ -182925,17 +192059,17 @@ func (c *VpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnGa if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnGatewayAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -183137,17 +192271,17 @@ func (c *VpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -183314,17 +192448,17 @@ func (c *VpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*VpnGateway, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnGateway{ ServerResponse: googleapi.ServerResponse{ @@ -183486,17 +192620,17 @@ func (c *VpnGatewaysGetStatusCall) Do(opts ...googleapi.CallOption) (*VpnGateway if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnGatewaysGetStatusResponse{ ServerResponse: googleapi.ServerResponse{ @@ -183664,17 +192798,17 @@ func (c *VpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -183914,17 +193048,17 @@ func (c *VpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*VpnGatewayList, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnGatewayList{ ServerResponse: googleapi.ServerResponse{ @@ -184137,17 +193271,17 @@ func (c *VpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -184310,17 +193444,17 @@ func (c *VpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -184572,17 +193706,17 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnTunnelAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -184784,17 +193918,17 @@ func (c *VpnTunnelsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -184961,17 +194095,17 @@ func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnTunnel{ ServerResponse: googleapi.ServerResponse{ @@ -185139,17 +194273,17 @@ func (c *VpnTunnelsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -185389,17 +194523,17 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnTunnelList{ ServerResponse: googleapi.ServerResponse{ @@ -185499,6 +194633,195 @@ func (c *VpnTunnelsListCall) Pages(ctx context.Context, f func(*VpnTunnelList) e } } +// method id "compute.vpnTunnels.setLabels": + +type VpnTunnelsSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a VpnTunnel. To learn more about +// labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - region: The region for this request. +// - resource: Name or id of the resource for this request. +func (r *VpnTunnelsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *VpnTunnelsSetLabelsCall { + c := &VpnTunnelsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *VpnTunnelsSetLabelsCall) RequestId(requestId string) *VpnTunnelsSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnTunnelsSetLabelsCall) Fields(s ...googleapi.Field) *VpnTunnelsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnTunnelsSetLabelsCall) Context(ctx context.Context) *VpnTunnelsSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnTunnelsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VpnTunnelsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/vpnTunnels/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.vpnTunnels.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *VpnTunnelsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a VpnTunnel. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.vpnTunnels.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/vpnTunnels/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.zoneOperations.delete": type ZoneOperationsDeleteCall struct { @@ -185583,7 +194906,7 @@ func (c *ZoneOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -185731,17 +195054,17 @@ func (c *ZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -185982,17 +195305,17 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ @@ -186191,17 +195514,17 @@ func (c *ZoneOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -186360,17 +195683,17 @@ func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Zone{ ServerResponse: googleapi.ServerResponse{ @@ -186599,17 +195922,17 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ZoneList{ ServerResponse: googleapi.ServerResponse{ diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index 2d3e00edc9f23..b328a7976ab54 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -79,6 +79,9 @@ type Error struct { Header http.Header Errors []ErrorItem + // err is typically a wrapped apierror.APIError, see + // google-api-go-client/internal/gensupport/error.go. + err error } // ErrorItem is a detailed error code & message from the Google API frontend. @@ -122,6 +125,15 @@ func (e *Error) Error() string { return buf.String() } +// Wrap allows an existing Error to wrap another error. See also [Error.Unwrap]. +func (e *Error) Wrap(err error) { + e.err = err +} + +func (e *Error) Unwrap() error { + return e.err +} + type errorReply struct { Error *Error `json:"error"` } @@ -174,8 +186,9 @@ func CheckMediaResponse(res *http.Response) error { } slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) return &Error{ - Code: res.StatusCode, - Body: string(slurp), + Code: res.StatusCode, + Body: string(slurp), + Header: res.Header, } } @@ -382,11 +395,11 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool { // For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, // you could request just those fields like this: // -// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// svc.Events.List().Fields("nextPageToken", "items/id").Do() // // or if you were also interested in each Item's "Updated" field, you can combine them like this: // -// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() // // Another way to find field names is through the Google API explorer: // https://developers.google.com/apis-explorer/#p/ diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go index 61720ec2ea13d..f5d826c2a19d5 100644 --- a/vendor/google.golang.org/api/googleapi/transport/apikey.go +++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -7,7 +7,7 @@ // // This package is DEPRECATED. Users should instead use, // -// service, err := NewService(..., option.WithAPIKey(...)) +// service, err := NewService(..., option.WithAPIKey(...)) package transport import ( diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json new file mode 100644 index 0000000000000..0c82b86a10082 --- /dev/null +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json @@ -0,0 +1,372 @@ +{ + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account." + } + } + } + }, + "basePath": "", + "baseUrl": "https://iamcredentials.googleapis.com/", + "batchPath": "batch", + "canonicalName": "IAM Credentials", + "description": "Creates short-lived credentials for impersonating IAM service accounts. To enable this API, you must enable the IAM API (iam.googleapis.com). ", + "discoveryVersion": "v1", + "documentationLink": "https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials", + "fullyEncodeReservedExpansion": true, + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "id": "iamcredentials:v1", + "kind": "discovery#restDescription", + "mtlsRootUrl": "https://iamcredentials.mtls.googleapis.com/", + "name": "iamcredentials", + "ownerDomain": "google.com", + "ownerName": "Google", + "parameters": { + "$.xgafv": { + "description": "V1 error format.", + "enum": [ + "1", + "2" + ], + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "type": "string" + }, + "access_token": { + "description": "OAuth access token.", + "location": "query", + "type": "string" + }, + "alt": { + "default": "json", + "description": "Data format for response.", + "enum": [ + "json", + "media", + "proto" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "type": "string" + }, + "callback": { + "description": "JSONP", + "location": "query", + "type": "string" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "location": "query", + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query", + "type": "string" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "location": "query", + "type": "string" + }, + "prettyPrint": { + "default": "true", + "description": "Returns response with indentations and line breaks.", + "location": "query", + "type": "boolean" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "location": "query", + "type": "string" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "location": "query", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "location": "query", + "type": "string" + } + }, + "protocol": "rest", + "resources": { + "projects": { + "resources": { + "serviceAccounts": { + "methods": { + "generateAccessToken": { + "description": "Generates an OAuth 2.0 access token for a service account.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:generateAccessToken", + "httpMethod": "POST", + "id": "iamcredentials.projects.serviceAccounts.generateAccessToken", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:generateAccessToken", + "request": { + "$ref": "GenerateAccessTokenRequest" + }, + "response": { + "$ref": "GenerateAccessTokenResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "generateIdToken": { + "description": "Generates an OpenID Connect ID token for a service account.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:generateIdToken", + "httpMethod": "POST", + "id": "iamcredentials.projects.serviceAccounts.generateIdToken", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:generateIdToken", + "request": { + "$ref": "GenerateIdTokenRequest" + }, + "response": { + "$ref": "GenerateIdTokenResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "signBlob": { + "description": "Signs a blob using a service account's system-managed private key.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", + "httpMethod": "POST", + "id": "iamcredentials.projects.serviceAccounts.signBlob", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:signBlob", + "request": { + "$ref": "SignBlobRequest" + }, + "response": { + "$ref": "SignBlobResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "signJwt": { + "description": "Signs a JWT using a service account's system-managed private key.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", + "httpMethod": "POST", + "id": "iamcredentials.projects.serviceAccounts.signJwt", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:signJwt", + "request": { + "$ref": "SignJwtRequest" + }, + "response": { + "$ref": "SignJwtResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + } + }, + "revision": "20211203", + "rootUrl": "https://iamcredentials.googleapis.com/", + "schemas": { + "GenerateAccessTokenRequest": { + "id": "GenerateAccessTokenRequest", + "properties": { + "delegates": { + "description": "The sequence of service accounts in a delegation chain. This field is required for [delegated requests](https://cloud.google.com/iam/help/credentials/delegated-request). For [direct requests](https://cloud.google.com/iam/help/credentials/direct-request), which are more common, do not specify this field. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "items": { + "type": "string" + }, + "type": "array" + }, + "lifetime": { + "description": "The desired lifetime duration of the access token in seconds. By default, the maximum allowed value is 1 hour. To set a lifetime of up to 12 hours, you can add the service account as an allowed value in an Organization Policy that enforces the `constraints/iam.allowServiceAccountCredentialLifetimeExtension` constraint. See detailed instructions at https://cloud.google.com/iam/help/credentials/lifetime If a value is not specified, the token's lifetime will be set to a default value of 1 hour.", + "format": "google-duration", + "type": "string" + }, + "scope": { + "description": "Required. Code to identify the scopes to be included in the OAuth 2.0 access token. See https://developers.google.com/identity/protocols/googlescopes for more information. At least one value required.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GenerateAccessTokenResponse": { + "id": "GenerateAccessTokenResponse", + "properties": { + "accessToken": { + "description": "The OAuth 2.0 access token.", + "type": "string" + }, + "expireTime": { + "description": "Token expiration time. The expiration time is always set.", + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, + "GenerateIdTokenRequest": { + "id": "GenerateIdTokenRequest", + "properties": { + "audience": { + "description": "Required. The audience for the token, such as the API or account that this token grants access to.", + "type": "string" + }, + "delegates": { + "description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "items": { + "type": "string" + }, + "type": "array" + }, + "includeEmail": { + "description": "Include the service account email in the token. If set to `true`, the token will contain `email` and `email_verified` claims.", + "type": "boolean" + } + }, + "type": "object" + }, + "GenerateIdTokenResponse": { + "id": "GenerateIdTokenResponse", + "properties": { + "token": { + "description": "The OpenId Connect ID token.", + "type": "string" + } + }, + "type": "object" + }, + "SignBlobRequest": { + "id": "SignBlobRequest", + "properties": { + "delegates": { + "description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "items": { + "type": "string" + }, + "type": "array" + }, + "payload": { + "description": "Required. The bytes to sign.", + "format": "byte", + "type": "string" + } + }, + "type": "object" + }, + "SignBlobResponse": { + "id": "SignBlobResponse", + "properties": { + "keyId": { + "description": "The ID of the key used to sign the blob. The key used for signing will remain valid for at least 12 hours after the blob is signed. To verify the signature, you can retrieve the public key in several formats from the following endpoints: - RSA public key wrapped in an X.509 v3 certificate: `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT_EMAIL}` - Raw key in JSON format: `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_EMAIL}` - JSON Web Key (JWK): `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_EMAIL}`", + "type": "string" + }, + "signedBlob": { + "description": "The signature for the blob. Does not include the original blob. After the key pair referenced by the `key_id` response field expires, Google no longer exposes the public key that can be used to verify the blob. As a result, the receiver can no longer verify the signature.", + "format": "byte", + "type": "string" + } + }, + "type": "object" + }, + "SignJwtRequest": { + "id": "SignJwtRequest", + "properties": { + "delegates": { + "description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "items": { + "type": "string" + }, + "type": "array" + }, + "payload": { + "description": "Required. The JWT payload to sign. Must be a serialized JSON object that contains a JWT Claims Set. For example: `{\"sub\": \"user@example.com\", \"iat\": 313435}` If the JWT Claims Set contains an expiration time (`exp`) claim, it must be an integer timestamp that is not in the past and no more than 12 hours in the future.", + "type": "string" + } + }, + "type": "object" + }, + "SignJwtResponse": { + "id": "SignJwtResponse", + "properties": { + "keyId": { + "description": "The ID of the key used to sign the JWT. The key used for signing will remain valid for at least 12 hours after the JWT is signed. To verify the signature, you can retrieve the public key in several formats from the following endpoints: - RSA public key wrapped in an X.509 v3 certificate: `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT_EMAIL}` - Raw key in JSON format: `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_EMAIL}` - JSON Web Key (JWK): `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_EMAIL}`", + "type": "string" + }, + "signedJwt": { + "description": "The signed JWT. Contains the automatically generated header; the client-supplied payload; and the signature, which is generated using the key referenced by the `kid` field in the header. After the key pair referenced by the `key_id` response field expires, Google no longer exposes the public key that can be used to verify the JWT. As a result, the receiver can no longer verify the signature.", + "type": "string" + } + }, + "type": "object" + } + }, + "servicePath": "", + "title": "IAM Service Account Credentials API", + "version": "v1", + "version_module": true +} \ No newline at end of file diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go new file mode 100644 index 0000000000000..63e5ef64c0dcc --- /dev/null +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -0,0 +1,1094 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated file. DO NOT EDIT. + +// Package iamcredentials provides access to the IAM Service Account Credentials API. +// +// For product documentation, see: https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials +// +// # Creating a client +// +// Usage example: +// +// import "google.golang.org/api/iamcredentials/v1" +// ... +// ctx := context.Background() +// iamcredentialsService, err := iamcredentials.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for authentication. +// +// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// # Other authentication options +// +// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// +// iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See https://godoc.org/google.golang.org/api/option/ for details on options. +package iamcredentials // import "google.golang.org/api/iamcredentials/v1" + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + googleapi "google.golang.org/api/googleapi" + internal "google.golang.org/api/internal" + gensupport "google.golang.org/api/internal/gensupport" + option "google.golang.org/api/option" + internaloption "google.golang.org/api/option/internaloption" + htransport "google.golang.org/api/transport/http" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = internaloption.WithDefaultEndpoint + +const apiId = "iamcredentials:v1" +const apiName = "iamcredentials" +const apiVersion = "v1" +const basePath = "https://iamcredentials.googleapis.com/" +const mtlsBasePath = "https://iamcredentials.mtls.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // See, edit, configure, and delete your Google Cloud data and see the + // email address for your Google Account. + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" +) + +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := internaloption.WithDefaultScopes( + "https://www.googleapis.com/auth/cloud-platform", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.ServiceAccounts = NewProjectsServiceAccountsService(s) + return rs +} + +type ProjectsService struct { + s *Service + + ServiceAccounts *ProjectsServiceAccountsService +} + +func NewProjectsServiceAccountsService(s *Service) *ProjectsServiceAccountsService { + rs := &ProjectsServiceAccountsService{s: s} + return rs +} + +type ProjectsServiceAccountsService struct { + s *Service +} + +type GenerateAccessTokenRequest struct { + // Delegates: The sequence of service accounts in a delegation chain. + // This field is required for delegated requests + // (https://cloud.google.com/iam/help/credentials/delegated-request). + // For direct requests + // (https://cloud.google.com/iam/help/credentials/direct-request), which + // are more common, do not specify this field. Each service account must + // be granted the `roles/iam.serviceAccountTokenCreator` role on its + // next service account in the chain. The last service account in the + // chain must be granted the `roles/iam.serviceAccountTokenCreator` role + // on the service account that is specified in the `name` field of the + // request. The delegates must have the following format: + // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` + // wildcard character is required; replacing it with a project ID is + // invalid. + Delegates []string `json:"delegates,omitempty"` + + // Lifetime: The desired lifetime duration of the access token in + // seconds. By default, the maximum allowed value is 1 hour. To set a + // lifetime of up to 12 hours, you can add the service account as an + // allowed value in an Organization Policy that enforces the + // `constraints/iam.allowServiceAccountCredentialLifetimeExtension` + // constraint. See detailed instructions at + // https://cloud.google.com/iam/help/credentials/lifetime If a value is + // not specified, the token's lifetime will be set to a default value of + // 1 hour. + Lifetime string `json:"lifetime,omitempty"` + + // Scope: Required. Code to identify the scopes to be included in the + // OAuth 2.0 access token. See + // https://developers.google.com/identity/protocols/googlescopes for + // more information. At least one value required. + Scope []string `json:"scope,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Delegates") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Delegates") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GenerateAccessTokenRequest) MarshalJSON() ([]byte, error) { + type NoMethod GenerateAccessTokenRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GenerateAccessTokenResponse struct { + // AccessToken: The OAuth 2.0 access token. + AccessToken string `json:"accessToken,omitempty"` + + // ExpireTime: Token expiration time. The expiration time is always set. + ExpireTime string `json:"expireTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AccessToken") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AccessToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GenerateAccessTokenResponse) MarshalJSON() ([]byte, error) { + type NoMethod GenerateAccessTokenResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GenerateIdTokenRequest struct { + // Audience: Required. The audience for the token, such as the API or + // account that this token grants access to. + Audience string `json:"audience,omitempty"` + + // Delegates: The sequence of service accounts in a delegation chain. + // Each service account must be granted the + // `roles/iam.serviceAccountTokenCreator` role on its next service + // account in the chain. The last service account in the chain must be + // granted the `roles/iam.serviceAccountTokenCreator` role on the + // service account that is specified in the `name` field of the request. + // The delegates must have the following format: + // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` + // wildcard character is required; replacing it with a project ID is + // invalid. + Delegates []string `json:"delegates,omitempty"` + + // IncludeEmail: Include the service account email in the token. If set + // to `true`, the token will contain `email` and `email_verified` + // claims. + IncludeEmail bool `json:"includeEmail,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Audience") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Audience") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GenerateIdTokenRequest) MarshalJSON() ([]byte, error) { + type NoMethod GenerateIdTokenRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GenerateIdTokenResponse struct { + // Token: The OpenId Connect ID token. + Token string `json:"token,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Token") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Token") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GenerateIdTokenResponse) MarshalJSON() ([]byte, error) { + type NoMethod GenerateIdTokenResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SignBlobRequest struct { + // Delegates: The sequence of service accounts in a delegation chain. + // Each service account must be granted the + // `roles/iam.serviceAccountTokenCreator` role on its next service + // account in the chain. The last service account in the chain must be + // granted the `roles/iam.serviceAccountTokenCreator` role on the + // service account that is specified in the `name` field of the request. + // The delegates must have the following format: + // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` + // wildcard character is required; replacing it with a project ID is + // invalid. + Delegates []string `json:"delegates,omitempty"` + + // Payload: Required. The bytes to sign. + Payload string `json:"payload,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Delegates") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Delegates") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SignBlobRequest) MarshalJSON() ([]byte, error) { + type NoMethod SignBlobRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SignBlobResponse struct { + // KeyId: The ID of the key used to sign the blob. The key used for + // signing will remain valid for at least 12 hours after the blob is + // signed. To verify the signature, you can retrieve the public key in + // several formats from the following endpoints: - RSA public key + // wrapped in an X.509 v3 certificate: + // `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT + // _EMAIL}` - Raw key in JSON format: + // `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_ + // EMAIL}` - JSON Web Key (JWK): + // `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_ + // EMAIL}` + KeyId string `json:"keyId,omitempty"` + + // SignedBlob: The signature for the blob. Does not include the original + // blob. After the key pair referenced by the `key_id` response field + // expires, Google no longer exposes the public key that can be used to + // verify the blob. As a result, the receiver can no longer verify the + // signature. + SignedBlob string `json:"signedBlob,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "KeyId") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "KeyId") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SignBlobResponse) MarshalJSON() ([]byte, error) { + type NoMethod SignBlobResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SignJwtRequest struct { + // Delegates: The sequence of service accounts in a delegation chain. + // Each service account must be granted the + // `roles/iam.serviceAccountTokenCreator` role on its next service + // account in the chain. The last service account in the chain must be + // granted the `roles/iam.serviceAccountTokenCreator` role on the + // service account that is specified in the `name` field of the request. + // The delegates must have the following format: + // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` + // wildcard character is required; replacing it with a project ID is + // invalid. + Delegates []string `json:"delegates,omitempty"` + + // Payload: Required. The JWT payload to sign. Must be a serialized JSON + // object that contains a JWT Claims Set. For example: `{"sub": + // "user@example.com", "iat": 313435}` If the JWT Claims Set contains an + // expiration time (`exp`) claim, it must be an integer timestamp that + // is not in the past and no more than 12 hours in the future. + Payload string `json:"payload,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Delegates") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Delegates") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SignJwtRequest) MarshalJSON() ([]byte, error) { + type NoMethod SignJwtRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SignJwtResponse struct { + // KeyId: The ID of the key used to sign the JWT. The key used for + // signing will remain valid for at least 12 hours after the JWT is + // signed. To verify the signature, you can retrieve the public key in + // several formats from the following endpoints: - RSA public key + // wrapped in an X.509 v3 certificate: + // `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT + // _EMAIL}` - Raw key in JSON format: + // `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_ + // EMAIL}` - JSON Web Key (JWK): + // `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_ + // EMAIL}` + KeyId string `json:"keyId,omitempty"` + + // SignedJwt: The signed JWT. Contains the automatically generated + // header; the client-supplied payload; and the signature, which is + // generated using the key referenced by the `kid` field in the header. + // After the key pair referenced by the `key_id` response field expires, + // Google no longer exposes the public key that can be used to verify + // the JWT. As a result, the receiver can no longer verify the + // signature. + SignedJwt string `json:"signedJwt,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "KeyId") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "KeyId") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SignJwtResponse) MarshalJSON() ([]byte, error) { + type NoMethod SignJwtResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "iamcredentials.projects.serviceAccounts.generateAccessToken": + +type ProjectsServiceAccountsGenerateAccessTokenCall struct { + s *Service + name string + generateaccesstokenrequest *GenerateAccessTokenRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GenerateAccessToken: Generates an OAuth 2.0 access token for a +// service account. +// +// - name: The resource name of the service account for which the +// credentials are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` +// wildcard character is required; replacing it with a project ID is +// invalid. +func (r *ProjectsServiceAccountsService) GenerateAccessToken(name string, generateaccesstokenrequest *GenerateAccessTokenRequest) *ProjectsServiceAccountsGenerateAccessTokenCall { + c := &ProjectsServiceAccountsGenerateAccessTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.generateaccesstokenrequest = generateaccesstokenrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsGenerateAccessTokenCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Context(ctx context.Context) *ProjectsServiceAccountsGenerateAccessTokenCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateaccesstokenrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:generateAccessToken") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iamcredentials.projects.serviceAccounts.generateAccessToken" call. +// Exactly one of *GenerateAccessTokenResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GenerateAccessTokenResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Do(opts ...googleapi.CallOption) (*GenerateAccessTokenResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GenerateAccessTokenResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Generates an OAuth 2.0 access token for a service account.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:generateAccessToken", + // "httpMethod": "POST", + // "id": "iamcredentials.projects.serviceAccounts.generateAccessToken", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + // "location": "path", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:generateAccessToken", + // "request": { + // "$ref": "GenerateAccessTokenRequest" + // }, + // "response": { + // "$ref": "GenerateAccessTokenResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iamcredentials.projects.serviceAccounts.generateIdToken": + +type ProjectsServiceAccountsGenerateIdTokenCall struct { + s *Service + name string + generateidtokenrequest *GenerateIdTokenRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GenerateIdToken: Generates an OpenID Connect ID token for a service +// account. +// +// - name: The resource name of the service account for which the +// credentials are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` +// wildcard character is required; replacing it with a project ID is +// invalid. +func (r *ProjectsServiceAccountsService) GenerateIdToken(name string, generateidtokenrequest *GenerateIdTokenRequest) *ProjectsServiceAccountsGenerateIdTokenCall { + c := &ProjectsServiceAccountsGenerateIdTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.generateidtokenrequest = generateidtokenrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountsGenerateIdTokenCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsGenerateIdTokenCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountsGenerateIdTokenCall) Context(ctx context.Context) *ProjectsServiceAccountsGenerateIdTokenCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountsGenerateIdTokenCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateidtokenrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:generateIdToken") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iamcredentials.projects.serviceAccounts.generateIdToken" call. +// Exactly one of *GenerateIdTokenResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GenerateIdTokenResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountsGenerateIdTokenCall) Do(opts ...googleapi.CallOption) (*GenerateIdTokenResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GenerateIdTokenResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Generates an OpenID Connect ID token for a service account.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:generateIdToken", + // "httpMethod": "POST", + // "id": "iamcredentials.projects.serviceAccounts.generateIdToken", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + // "location": "path", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:generateIdToken", + // "request": { + // "$ref": "GenerateIdTokenRequest" + // }, + // "response": { + // "$ref": "GenerateIdTokenResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iamcredentials.projects.serviceAccounts.signBlob": + +type ProjectsServiceAccountsSignBlobCall struct { + s *Service + name string + signblobrequest *SignBlobRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SignBlob: Signs a blob using a service account's system-managed +// private key. +// +// - name: The resource name of the service account for which the +// credentials are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` +// wildcard character is required; replacing it with a project ID is +// invalid. +func (r *ProjectsServiceAccountsService) SignBlob(name string, signblobrequest *SignBlobRequest) *ProjectsServiceAccountsSignBlobCall { + c := &ProjectsServiceAccountsSignBlobCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.signblobrequest = signblobrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountsSignBlobCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsSignBlobCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountsSignBlobCall) Context(ctx context.Context) *ProjectsServiceAccountsSignBlobCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountsSignBlobCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signblobrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:signBlob") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iamcredentials.projects.serviceAccounts.signBlob" call. +// Exactly one of *SignBlobResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SignBlobResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) (*SignBlobResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SignBlobResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Signs a blob using a service account's system-managed private key.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", + // "httpMethod": "POST", + // "id": "iamcredentials.projects.serviceAccounts.signBlob", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + // "location": "path", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:signBlob", + // "request": { + // "$ref": "SignBlobRequest" + // }, + // "response": { + // "$ref": "SignBlobResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iamcredentials.projects.serviceAccounts.signJwt": + +type ProjectsServiceAccountsSignJwtCall struct { + s *Service + name string + signjwtrequest *SignJwtRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SignJwt: Signs a JWT using a service account's system-managed private +// key. +// +// - name: The resource name of the service account for which the +// credentials are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` +// wildcard character is required; replacing it with a project ID is +// invalid. +func (r *ProjectsServiceAccountsService) SignJwt(name string, signjwtrequest *SignJwtRequest) *ProjectsServiceAccountsSignJwtCall { + c := &ProjectsServiceAccountsSignJwtCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.signjwtrequest = signjwtrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountsSignJwtCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsSignJwtCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountsSignJwtCall) Context(ctx context.Context) *ProjectsServiceAccountsSignJwtCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountsSignJwtCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signjwtrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:signJwt") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iamcredentials.projects.serviceAccounts.signJwt" call. +// Exactly one of *SignJwtResponse or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SignJwtResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (*SignJwtResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SignJwtResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Signs a JWT using a service account's system-managed private key.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", + // "httpMethod": "POST", + // "id": "iamcredentials.projects.serviceAccounts.signJwt", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + // "location": "path", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:signJwt", + // "request": { + // "$ref": "SignJwtRequest" + // }, + // "response": { + // "$ref": "SignJwtResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/impersonate/doc.go b/vendor/google.golang.org/api/impersonate/doc.go new file mode 100644 index 0000000000000..155ef70e15a0e --- /dev/null +++ b/vendor/google.golang.org/api/impersonate/doc.go @@ -0,0 +1,32 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package impersonate is used to impersonate Google Credentials. +// +// # Required IAM roles +// +// In order to impersonate a service account the base service account must have +// the Service Account Token Creator role, roles/iam.serviceAccountTokenCreator, +// on the service account being impersonated. See +// https://cloud.google.com/iam/docs/understanding-service-accounts. +// +// Optionally, delegates can be used during impersonation if the base service +// account lacks the token creator role on the target. When using delegates, +// each service account must be granted roles/iam.serviceAccountTokenCreator +// on the next service account in the delgation chain. +// +// For example, if a base service account of SA1 is trying to impersonate target +// service account SA2 while using delegate service accounts DSA1 and DSA2, +// the following must be true: +// +// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on +// DSA1. +// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. +// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. +// +// If the base credential is an authorized user and not a service account, or if +// the option WithQuotaProject is set, the target service account must have a +// role that grants the serviceusage.services.use permission such as +// roles/serviceusage.serviceUsageConsumer. +package impersonate diff --git a/vendor/google.golang.org/api/impersonate/idtoken.go b/vendor/google.golang.org/api/impersonate/idtoken.go new file mode 100644 index 0000000000000..a2defff151854 --- /dev/null +++ b/vendor/google.golang.org/api/impersonate/idtoken.go @@ -0,0 +1,129 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "golang.org/x/oauth2" + "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" +) + +// IDTokenConfig for generating an impersonated ID token. +type IDTokenConfig struct { + // Audience is the `aud` field for the token, such as an API endpoint the + // token will grant access to. Required. + Audience string + // TargetPrincipal is the email address of the service account to + // impersonate. Required. + TargetPrincipal string + // IncludeEmail includes the service account's email in the token. The + // resulting token will include both an `email` and `email_verified` + // claim. + IncludeEmail bool + // Delegates are the service account email addresses in a delegation chain. + // Each service account must be granted roles/iam.serviceAccountTokenCreator + // on the next service account in the chain. Optional. + Delegates []string +} + +// IDTokenSource creates an impersonated TokenSource that returns ID tokens +// configured with the provided config and using credentials loaded from +// Application Default Credentials as the base credentials. The tokens provided +// by the source are valid for one hour and are automatically refreshed. +func IDTokenSource(ctx context.Context, config IDTokenConfig, opts ...option.ClientOption) (oauth2.TokenSource, error) { + if config.Audience == "" { + return nil, fmt.Errorf("impersonate: an audience must be provided") + } + if config.TargetPrincipal == "" { + return nil, fmt.Errorf("impersonate: a target service account must be provided") + } + + clientOpts := append(defaultClientOptions(), opts...) + client, _, err := htransport.NewClient(ctx, clientOpts...) + if err != nil { + return nil, err + } + + its := impersonatedIDTokenSource{ + client: client, + targetPrincipal: config.TargetPrincipal, + audience: config.Audience, + includeEmail: config.IncludeEmail, + } + for _, v := range config.Delegates { + its.delegates = append(its.delegates, formatIAMServiceAccountName(v)) + } + return oauth2.ReuseTokenSource(nil, its), nil +} + +type generateIDTokenRequest struct { + Audience string `json:"audience"` + IncludeEmail bool `json:"includeEmail"` + Delegates []string `json:"delegates,omitempty"` +} + +type generateIDTokenResponse struct { + Token string `json:"token"` +} + +type impersonatedIDTokenSource struct { + client *http.Client + + targetPrincipal string + audience string + includeEmail bool + delegates []string +} + +func (i impersonatedIDTokenSource) Token() (*oauth2.Token, error) { + now := time.Now() + genIDTokenReq := generateIDTokenRequest{ + Audience: i.audience, + IncludeEmail: i.includeEmail, + Delegates: i.delegates, + } + bodyBytes, err := json.Marshal(genIDTokenReq) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to marshal request: %v", err) + } + + url := fmt.Sprintf("%s/v1/%s:generateIdToken", iamCredentailsEndpoint, formatIAMServiceAccountName(i.targetPrincipal)) + req, err := http.NewRequest("POST", url, bytes.NewReader(bodyBytes)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to create request: %v", err) + } + req.Header.Set("Content-Type", "application/json") + resp, err := i.client.Do(req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to generate ID token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to read body: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var generateIDTokenResp generateIDTokenResponse + if err := json.Unmarshal(body, &generateIDTokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %v", err) + } + return &oauth2.Token{ + AccessToken: generateIDTokenResp.Token, + // Generated ID tokens are good for one hour. + Expiry: now.Add(1 * time.Hour), + }, nil +} diff --git a/vendor/google.golang.org/api/impersonate/impersonate.go b/vendor/google.golang.org/api/impersonate/impersonate.go new file mode 100644 index 0000000000000..52c32589b723a --- /dev/null +++ b/vendor/google.golang.org/api/impersonate/impersonate.go @@ -0,0 +1,184 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "golang.org/x/oauth2" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + htransport "google.golang.org/api/transport/http" +) + +var ( + iamCredentailsEndpoint = "https://iamcredentials.googleapis.com" + oauth2Endpoint = "https://oauth2.googleapis.com" +) + +// CredentialsConfig for generating impersonated credentials. +type CredentialsConfig struct { + // TargetPrincipal is the email address of the service account to + // impersonate. Required. + TargetPrincipal string + // Scopes that the impersonated credential should have. Required. + Scopes []string + // Delegates are the service account email addresses in a delegation chain. + // Each service account must be granted roles/iam.serviceAccountTokenCreator + // on the next service account in the chain. Optional. + Delegates []string + // Lifetime is the amount of time until the impersonated token expires. If + // unset the token's lifetime will be one hour and be automatically + // refreshed. If set the token may have a max lifetime of one hour and will + // not be refreshed. Service accounts that have been added to an org policy + // with constraints/iam.allowServiceAccountCredentialLifetimeExtension may + // request a token lifetime of up to 12 hours. Optional. + Lifetime time.Duration + // Subject is the sub field of a JWT. This field should only be set if you + // wish to impersonate as a user. This feature is useful when using domain + // wide delegation. Optional. + Subject string +} + +// defaultClientOptions ensures the base credentials will work with the IAM +// Credentials API if no scope or audience is set by the user. +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultAudience("https://iamcredentials.googleapis.com/"), + internaloption.WithDefaultScopes("https://www.googleapis.com/auth/cloud-platform"), + } +} + +// CredentialsTokenSource returns an impersonated CredentialsTokenSource configured with the provided +// config and using credentials loaded from Application Default Credentials as +// the base credentials. +func CredentialsTokenSource(ctx context.Context, config CredentialsConfig, opts ...option.ClientOption) (oauth2.TokenSource, error) { + if config.TargetPrincipal == "" { + return nil, fmt.Errorf("impersonate: a target service account must be provided") + } + if len(config.Scopes) == 0 { + return nil, fmt.Errorf("impersonate: scopes must be provided") + } + if config.Lifetime.Hours() > 12 { + return nil, fmt.Errorf("impersonate: max lifetime is 12 hours") + } + + var isStaticToken bool + // Default to the longest acceptable value of one hour as the token will + // be refreshed automatically if not set. + lifetime := 3600 * time.Second + if config.Lifetime != 0 { + lifetime = config.Lifetime + // Don't auto-refresh token if a lifetime is configured. + isStaticToken = true + } + + clientOpts := append(defaultClientOptions(), opts...) + client, _, err := htransport.NewClient(ctx, clientOpts...) + if err != nil { + return nil, err + } + // If a subject is specified a different auth-flow is initiated to + // impersonate as the provided subject (user). + if config.Subject != "" { + return user(ctx, config, client, lifetime, isStaticToken) + } + + its := impersonatedTokenSource{ + client: client, + targetPrincipal: config.TargetPrincipal, + lifetime: fmt.Sprintf("%.fs", lifetime.Seconds()), + } + for _, v := range config.Delegates { + its.delegates = append(its.delegates, formatIAMServiceAccountName(v)) + } + its.scopes = make([]string, len(config.Scopes)) + copy(its.scopes, config.Scopes) + + if isStaticToken { + tok, err := its.Token() + if err != nil { + return nil, err + } + return oauth2.StaticTokenSource(tok), nil + } + return oauth2.ReuseTokenSource(nil, its), nil +} + +func formatIAMServiceAccountName(name string) string { + return fmt.Sprintf("projects/-/serviceAccounts/%s", name) +} + +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type generateAccessTokenResp struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +type impersonatedTokenSource struct { + client *http.Client + + targetPrincipal string + lifetime string + scopes []string + delegates []string +} + +// Token returns an impersonated Token. +func (i impersonatedTokenSource) Token() (*oauth2.Token, error) { + reqBody := generateAccessTokenReq{ + Delegates: i.delegates, + Lifetime: i.lifetime, + Scope: i.scopes, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to marshal request: %v", err) + } + url := fmt.Sprintf("%s/v1/%s:generateAccessToken", iamCredentailsEndpoint, formatIAMServiceAccountName(i.targetPrincipal)) + req, err := http.NewRequest("POST", url, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to create request: %v", err) + } + req.Header.Set("Content-Type", "application/json") + + resp, err := i.client.Do(req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to read body: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var accessTokenResp generateAccessTokenResp + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %v", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to parse expiry: %v", err) + } + return &oauth2.Token{ + AccessToken: accessTokenResp.AccessToken, + Expiry: expiry, + }, nil +} diff --git a/vendor/google.golang.org/api/impersonate/user.go b/vendor/google.golang.org/api/impersonate/user.go new file mode 100644 index 0000000000000..059deab71177e --- /dev/null +++ b/vendor/google.golang.org/api/impersonate/user.go @@ -0,0 +1,169 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2" +) + +func user(ctx context.Context, c CredentialsConfig, client *http.Client, lifetime time.Duration, isStaticToken bool) (oauth2.TokenSource, error) { + u := userTokenSource{ + client: client, + targetPrincipal: c.TargetPrincipal, + subject: c.Subject, + lifetime: lifetime, + } + u.delegates = make([]string, len(c.Delegates)) + for i, v := range c.Delegates { + u.delegates[i] = formatIAMServiceAccountName(v) + } + u.scopes = make([]string, len(c.Scopes)) + copy(u.scopes, c.Scopes) + if isStaticToken { + tok, err := u.Token() + if err != nil { + return nil, err + } + return oauth2.StaticTokenSource(tok), nil + } + return oauth2.ReuseTokenSource(nil, u), nil +} + +type claimSet struct { + Iss string `json:"iss"` + Scope string `json:"scope,omitempty"` + Sub string `json:"sub,omitempty"` + Aud string `json:"aud"` + Iat int64 `json:"iat"` + Exp int64 `json:"exp"` +} + +type signJWTRequest struct { + Payload string `json:"payload"` + Delegates []string `json:"delegates,omitempty"` +} + +type signJWTResponse struct { + // KeyID is the key used to sign the JWT. + KeyID string `json:"keyId"` + // SignedJwt contains the automatically generated header; the + // client-supplied payload; and the signature, which is generated using + // the key referenced by the `kid` field in the header. + SignedJWT string `json:"signedJwt"` +} + +type exchangeTokenResponse struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` +} + +type userTokenSource struct { + client *http.Client + + targetPrincipal string + subject string + scopes []string + lifetime time.Duration + delegates []string +} + +func (u userTokenSource) Token() (*oauth2.Token, error) { + signedJWT, err := u.signJWT() + if err != nil { + return nil, err + } + return u.exchangeToken(signedJWT) +} + +func (u userTokenSource) signJWT() (string, error) { + now := time.Now() + exp := now.Add(u.lifetime) + claims := claimSet{ + Iss: u.targetPrincipal, + Scope: strings.Join(u.scopes, " "), + Sub: u.subject, + Aud: fmt.Sprintf("%s/token", oauth2Endpoint), + Iat: now.Unix(), + Exp: exp.Unix(), + } + payloadBytes, err := json.Marshal(claims) + if err != nil { + return "", fmt.Errorf("impersonate: unable to marshal claims: %v", err) + } + signJWTReq := signJWTRequest{ + Payload: string(payloadBytes), + Delegates: u.delegates, + } + + bodyBytes, err := json.Marshal(signJWTReq) + if err != nil { + return "", fmt.Errorf("impersonate: unable to marshal request: %v", err) + } + reqURL := fmt.Sprintf("%s/v1/%s:signJwt", iamCredentailsEndpoint, formatIAMServiceAccountName(u.targetPrincipal)) + req, err := http.NewRequest("POST", reqURL, bytes.NewReader(bodyBytes)) + if err != nil { + return "", fmt.Errorf("impersonate: unable to create request: %v", err) + } + req.Header.Set("Content-Type", "application/json") + rawResp, err := u.client.Do(req) + if err != nil { + return "", fmt.Errorf("impersonate: unable to sign JWT: %v", err) + } + body, err := ioutil.ReadAll(io.LimitReader(rawResp.Body, 1<<20)) + if err != nil { + return "", fmt.Errorf("impersonate: unable to read body: %v", err) + } + if c := rawResp.StatusCode; c < 200 || c > 299 { + return "", fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var signJWTResp signJWTResponse + if err := json.Unmarshal(body, &signJWTResp); err != nil { + return "", fmt.Errorf("impersonate: unable to parse response: %v", err) + } + return signJWTResp.SignedJWT, nil +} + +func (u userTokenSource) exchangeToken(signedJWT string) (*oauth2.Token, error) { + now := time.Now() + v := url.Values{} + v.Set("grant_type", "assertion") + v.Set("assertion_type", "http://oauth.net/grant_type/jwt/1.0/bearer") + v.Set("assertion", signedJWT) + rawResp, err := u.client.PostForm(fmt.Sprintf("%s/token", oauth2Endpoint), v) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to exchange token: %v", err) + } + body, err := ioutil.ReadAll(io.LimitReader(rawResp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to read body: %v", err) + } + if c := rawResp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var tokenResp exchangeTokenResponse + if err := json.Unmarshal(body, &tokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %v", err) + } + + return &oauth2.Token{ + AccessToken: tokenResp.AccessToken, + TokenType: tokenResp.TokenType, + Expiry: now.Add(time.Second * time.Duration(tokenResp.ExpiresIn)), + }, nil +} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index b067a179b7a88..32d52413b30ca 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -70,11 +70,12 @@ const ( // // - A self-signed JWT flow will be executed if the following conditions are // met: -// (1) At least one of the following is true: -// (a) No scope is provided -// (b) Scope for self-signed JWT flow is enabled -// (c) Audiences are explicitly provided by users -// (2) No service account impersontation +// +// (1) At least one of the following is true: +// (a) No scope is provided +// (b) Scope for self-signed JWT flow is enabled +// (c) Audiences are explicitly provided by users +// (2) No service account impersontation // // - Otherwise, executes standard OAuth 2.0 flow // More details: google.aip.dev/auth/4111 diff --git a/vendor/google.golang.org/api/internal/gensupport/error.go b/vendor/google.golang.org/api/internal/gensupport/error.go new file mode 100644 index 0000000000000..886c6532b1531 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/error.go @@ -0,0 +1,24 @@ +// Copyright 2022 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "errors" + + "github.com/googleapis/gax-go/v2/apierror" + "google.golang.org/api/googleapi" +) + +// WrapError creates an [apierror.APIError] from err, wraps it in err, and +// returns err. If err is not a [googleapi.Error] (or a +// [google.golang.org/grpc/status.Status]), it returns err without modification. +func WrapError(err error) error { + var herr *googleapi.Error + apiError, ok := apierror.ParseError(err, false) + if ok && errors.As(err, &herr) { + herr.Wrap(apiError) + } + return err +} diff --git a/vendor/google.golang.org/api/internal/gensupport/json.go b/vendor/google.golang.org/api/internal/gensupport/json.go index c01e32189f44b..eab49a11eb184 100644 --- a/vendor/google.golang.org/api/internal/gensupport/json.go +++ b/vendor/google.golang.org/api/internal/gensupport/json.go @@ -13,9 +13,10 @@ import ( // MarshalJSON returns a JSON encoding of schema containing only selected fields. // A field is selected if any of the following is true: -// * it has a non-empty value -// * its field name is present in forceSendFields and it is not a nil pointer or nil interface -// * its field name is present in nullFields. +// - it has a non-empty value +// - its field name is present in forceSendFields and it is not a nil pointer or nil interface +// - its field name is present in nullFields. +// // The JSON key for each selected field is taken from the field's json: struct tag. func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) { if len(forceSendFields) == 0 && len(nullFields) == 0 { @@ -85,7 +86,12 @@ func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNu if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil { ms, ok := v.Interface().(map[string]string) if !ok { - return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name) + mi, err := initMapSlow(v, f.Name, useNullMaps) + if err != nil { + return nil, err + } + m[tag.apiName] = mi + continue } mi := map[string]interface{}{} for k, v := range ms { @@ -119,6 +125,25 @@ func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNu return m, nil } +// initMapSlow uses reflection to build up a map object. This is slower than +// the default behavior so it should be used only as a fallback. +func initMapSlow(rv reflect.Value, fieldName string, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) { + mi := map[string]interface{}{} + iter := rv.MapRange() + for iter.Next() { + k, ok := iter.Key().Interface().(string) + if !ok { + return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]any", fieldName) + } + v := iter.Value().Interface() + mi[k] = v + } + for k := range useNullMaps[fieldName] { + mi[k] = nil + } + return mi, nil +} + // formatAsString returns a string representation of v, dereferencing it first if possible. func formatAsString(v reflect.Value, kind reflect.Kind) string { if kind == reflect.Ptr && !v.IsNil() { diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go index d14a22470c19c..8356e7f27b07c 100644 --- a/vendor/google.golang.org/api/internal/gensupport/media.go +++ b/vendor/google.golang.org/api/internal/gensupport/media.go @@ -17,92 +17,10 @@ import ( "sync" "time" + gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/googleapi" ) -const sniffBuffSize = 512 - -func newContentSniffer(r io.Reader) *contentSniffer { - return &contentSniffer{r: r} -} - -// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. -type contentSniffer struct { - r io.Reader - start []byte // buffer for the sniffed bytes. - err error // set to any error encountered while reading bytes to be sniffed. - - ctype string // set on first sniff. - sniffed bool // set to true on first sniff. -} - -func (cs *contentSniffer) Read(p []byte) (n int, err error) { - // Ensure that the content type is sniffed before any data is consumed from Reader. - _, _ = cs.ContentType() - - if len(cs.start) > 0 { - n := copy(p, cs.start) - cs.start = cs.start[n:] - return n, nil - } - - // We may have read some bytes into start while sniffing, even if the read ended in an error. - // We should first return those bytes, then the error. - if cs.err != nil { - return 0, cs.err - } - - // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. - return cs.r.Read(p) -} - -// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. -func (cs *contentSniffer) ContentType() (string, bool) { - if cs.sniffed { - return cs.ctype, cs.ctype != "" - } - cs.sniffed = true - // If ReadAll hits EOF, it returns err==nil. - cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) - - // Don't try to detect the content type based on possibly incomplete data. - if cs.err != nil { - return "", false - } - - cs.ctype = http.DetectContentType(cs.start) - return cs.ctype, true -} - -// DetermineContentType determines the content type of the supplied reader. -// If the content type is already known, it can be specified via ctype. -// Otherwise, the content of media will be sniffed to determine the content type. -// If media implements googleapi.ContentTyper (deprecated), this will be used -// instead of sniffing the content. -// After calling DetectContentType the caller must not perform further reads on -// media, but rather read from the Reader that is returned. -func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { - // Note: callers could avoid calling DetectContentType if ctype != "", - // but doing the check inside this function reduces the amount of - // generated code. - if ctype != "" { - return media, ctype - } - - // For backwards compatibility, allow clients to set content - // type by providing a ContentTyper for media. - if typer, ok := media.(googleapi.ContentTyper); ok { - return media, typer.ContentType() - } - - sniffer := newContentSniffer(media) - if ctype, ok := sniffer.ContentType(); ok { - return sniffer, ctype - } - // If content type could not be sniffed, reads from sniffer will eventually fail with an error. - return sniffer, "" -} - type typeReader struct { io.Reader typ string @@ -234,7 +152,10 @@ func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { mi := &MediaInfo{} opts := googleapi.ProcessMediaOptions(options) if !opts.ForceEmptyContentType { - r, mi.mType = DetermineContentType(r, opts.ContentType) + mi.mType = opts.ContentType + if mi.mType == "" { + r, mi.mType = gax.DetermineContentType(r) + } } mi.chunkRetryDeadline = opts.ChunkRetryDeadline mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize) @@ -245,7 +166,11 @@ func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { // call. It returns a MediaInfo using the given reader, size and media type. func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo { rdr := ReaderAtToReader(r, size) - rdr, mType := DetermineContentType(rdr, mediaType) + mType := mediaType + if mType == "" { + rdr, mType = gax.DetermineContentType(rdr) + } + return &MediaInfo{ size: size, mType: mType, @@ -289,13 +214,12 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB // be retried because the data is stored in the MediaBuffer. media, _, _, _ = mi.buffer.Chunk() } + toCleanup := []io.Closer{} if media != nil { fb := readerFunc(body) fm := readerFunc(media) combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) - toCleanup := []io.Closer{ - combined, - } + toCleanup = append(toCleanup, combined) if fb != nil && fm != nil { getBody = func() (io.ReadCloser, error) { rb := ioutil.NopCloser(fb()) @@ -309,18 +233,30 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB return r, nil } } - cleanup = func() { - for _, closer := range toCleanup { - _ = closer.Close() - } - - } reqHeaders.Set("Content-Type", ctype) body = combined } if mi.buffer != nil && mi.mType != "" && !mi.singleChunk { + // This happens when initiating a resumable upload session. + // The initial request contains a JSON body rather than media. + // It can be retried with a getBody function that re-creates the request body. + fb := readerFunc(body) + if fb != nil { + getBody = func() (io.ReadCloser, error) { + rb := ioutil.NopCloser(fb()) + toCleanup = append(toCleanup, rb) + return rb, nil + } + } reqHeaders.Set("X-Upload-Content-Type", mi.mType) } + // Ensure that any bodies created in getBody are cleaned up. + cleanup = func() { + for _, closer := range toCleanup { + _ = closer.Close() + } + + } return body, getBody, cleanup } diff --git a/vendor/google.golang.org/api/internal/gensupport/params.go b/vendor/google.golang.org/api/internal/gensupport/params.go index 6703721ffde57..1a30d2ca25241 100644 --- a/vendor/google.golang.org/api/internal/gensupport/params.go +++ b/vendor/google.golang.org/api/internal/gensupport/params.go @@ -37,7 +37,7 @@ func (u URLParams) SetMulti(key string, values []string) { u[key] = values } -// Encode encodes the values into ``URL encoded'' form +// Encode encodes the values into “URL encoded” form // ("bar=baz&foo=quux") sorted by key. func (u URLParams) Encode() string { return url.Values(u).Encode() diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go index 0eae147fa92fb..0c659188dda18 100644 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -10,8 +10,12 @@ import ( "fmt" "io" "net/http" + "strings" "sync" "time" + + "github.com/google/uuid" + "google.golang.org/api/internal" ) // ResumableUpload is used by the generated APIs to provide resumable uploads. @@ -38,6 +42,11 @@ type ResumableUpload struct { // ChunkRetryDeadline configures the per-chunk deadline after which no further // retries should happen. ChunkRetryDeadline time.Duration + + // Track current request invocation ID and attempt count for retry metric + // headers. + invocationID string + attempts int } // Progress returns the number of bytes uploaded at this point. @@ -72,6 +81,10 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, req.Header.Set("Content-Type", rx.MediaType) req.Header.Set("User-Agent", rx.UserAgent) + baseXGoogHeader := "gl-go/" + GoVersion() + " gdcl/" + internal.Version + invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", rx.invocationID, rx.attempts) + req.Header.Set("X-Goog-Api-Client", strings.Join([]string{baseXGoogHeader, invocationHeader}, " ")) + // Google's upload endpoint uses status code 308 for a // different purpose than the "308 Permanent Redirect" // since-standardized in RFC 7238. Because of the conflict in @@ -178,9 +191,11 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err for { var pause time.Duration - // Each chunk gets its own initialized-at-zero backoff. + // Each chunk gets its own initialized-at-zero backoff and invocation ID. bo := rx.Retry.backoff() quitAfter := time.After(retryDeadline) + rx.attempts = 1 + rx.invocationID = uuid.New().String() // Retry loop for a single chunk. for { @@ -223,6 +238,7 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err break } + rx.attempts++ pause = bo.Pause() if resp != nil && resp.Body != nil { resp.Body.Close() diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index dd50cc20a58c6..dd24139b36664 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -8,12 +8,36 @@ import ( "context" "encoding/json" "errors" + "fmt" "net/http" + "strings" "time" + "github.com/google/uuid" "github.com/googleapis/gax-go/v2" ) +// Use this error type to return an error which allows introspection of both +// the context error and the error from the service. +type wrappedCallErr struct { + ctxErr error + wrappedErr error +} + +func (e wrappedCallErr) Error() string { + return fmt.Sprintf("retry failed with %v; last error: %v", e.ctxErr, e.wrappedErr) +} + +func (e wrappedCallErr) Unwrap() error { + return e.wrappedErr +} + +// Is allows errors.Is to match the error from the call as well as context +// sentinel errors. +func (e wrappedCallErr) Is(target error) bool { + return errors.Is(e.ctxErr, target) || errors.Is(e.wrappedErr, target) +} + // SendRequest sends a single HTTP request using the given client. // If ctx is non-nil, it calls all hooks, then sends the request with // req.WithContext, then calls any functions returned by the hooks in @@ -71,6 +95,9 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r var resp *http.Response var err error + attempts := 1 + invocationID := uuid.New().String() + baseXGoogHeader := req.Header.Get("X-Goog-Api-Client") // Loop to retry the request, up to the context deadline. var pause time.Duration @@ -90,12 +117,12 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r for { select { case <-ctx.Done(): - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err == nil { - err = ctx.Err() + // If we got an error and the context has been canceled, return an error acknowledging + // both the context cancelation and the service error. + if err != nil { + return resp, wrappedCallErr{ctx.Err(), err} } - return resp, err + return resp, ctx.Err() case <-time.After(pause): } @@ -104,11 +131,14 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r // select is satisfied at the same time, Go will choose one arbitrarily. // That can cause an operation to go through even if the context was // canceled before. - if err == nil { - err = ctx.Err() + if err != nil { + return resp, wrappedCallErr{ctx.Err(), err} } - return resp, err + return resp, ctx.Err() } + invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", invocationID, attempts) + xGoogHeader := strings.Join([]string{invocationHeader, baseXGoogHeader}, " ") + req.Header.Set("X-Goog-Api-Client", xGoogHeader) resp, err = client.Do(req.WithContext(ctx)) @@ -123,6 +153,7 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r if req.GetBody == nil || !errorFunc(status, err) { break } + attempts++ var errBody error req.Body, errBody = req.GetBody() if errBody != nil { diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 6b00f4d472188..db10d4bc9f241 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.83.0" +const Version = "0.106.0" diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index 343a5a965ebc9..cc7ebfe277bf6 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -134,3 +134,10 @@ type withCreds google.Credentials func (w *withCreds) Apply(o *internal.DialSettings) { o.InternalCredentials = (*google.Credentials)(w) } + +// EmbeddableAdapter is a no-op option.ClientOption that allow libraries to +// create their own client options by embedding this type into their own +// client-specific option wrapper. See example for usage. +type EmbeddableAdapter struct{} + +func (*EmbeddableAdapter) Apply(_ *internal.DialSettings) {} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index 60743c63e2d14..b2085a1949abf 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -82,6 +82,9 @@ func (w withEndpoint) Apply(o *internal.DialSettings) { // WithScopes returns a ClientOption that overrides the default OAuth2 scopes // to be used for a service. +// +// If both WithScopes and WithTokenSource are used, scope settings from the +// token source will be used instead. func WithScopes(scope ...string) ClientOption { return withScopes(scope) } @@ -93,7 +96,9 @@ func (w withScopes) Apply(o *internal.DialSettings) { copy(o.Scopes, w) } -// WithUserAgent returns a ClientOption that sets the User-Agent. +// WithUserAgent returns a ClientOption that sets the User-Agent. This option +// is incompatible with the [WithHTTPClient] option. If you wish to provide a +// custom client you will need to add this header via RoundTripper middleware. func WithUserAgent(ua string) ClientOption { return withUA(ua) } @@ -287,10 +292,10 @@ func (w withClientCertSource) Apply(o *internal.DialSettings) { // service account SA2 while using delegate service accounts DSA1 and DSA2, // the following must be true: // -// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on -// DSA1. -// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. -// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. +// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on +// DSA1. +// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. +// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. // // The resulting impersonated credential will either have the default scopes of // the client being instantiating or the scopes from WithScopes if provided. @@ -305,9 +310,9 @@ func (w withClientCertSource) Apply(o *internal.DialSettings) { // // This is an EXPERIMENTAL API and may be changed or removed in the future. // -// This option has been replaced by `impersonate` package: +// Deprecated: This option has been replaced by `impersonate` package: // `google.golang.org/api/impersonate`. Please use the `impersonate` package -// instead. +// instead with the WithTokenSource option. func ImpersonateCredentials(target string, delegates ...string) ClientOption { return impersonateServiceAccount{ target: target, diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 8b0a8f87d9da9..bc1fca74391e5 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"3135383434363131313530373135383336353335\"", + "etag": "\"3134363638303431303535363634343235383633\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -3005,7 +3005,7 @@ } } }, - "revision": "20220509", + "revision": "20220705", "rootUrl": "https://storage.googleapis.com/", "schemas": { "Bucket": { @@ -3084,6 +3084,19 @@ }, "type": "array" }, + "customPlacementConfig": { + "description": "The bucket's custom placement configuration for Custom Dual Regions.", + "properties": { + "dataLocations": { + "description": "The list of regional locations in which data is placed.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "defaultEventBasedHold": { "description": "The default value for event-based hold on newly created objects in this bucket. Event-based hold is a way to retain objects indefinitely until an event occurs, signified by the hold's release. After being released, such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here, bucket-level retention is 3 years and the event is loan being paid in full. In this example, these objects will be held intact for any number of years until the event has occurred (event-based hold on the object is released) and then 3 more years after that. That means retention duration of the objects begins from the moment event-based hold transitioned from true to false. Objects under event-based hold cannot be deleted, overwritten or archived until the hold is removed.", "type": "boolean" @@ -3181,7 +3194,7 @@ "type": "string" }, "type": { - "description": "Type of the action. Currently, only Delete and SetStorageClass are supported.", + "description": "Type of the action. Currently, only Delete, SetStorageClass, and AbortIncompleteMultipartUpload are supported.", "type": "string" } }, @@ -3982,7 +3995,7 @@ "type": "string" }, "updated": { - "description": "The modification time of the object metadata in RFC 3339 format.", + "description": "The modification time of the object metadata in RFC 3339 format. Set initially to object creation time and then updated whenever any metadata of the object changes. This includes changes made by a requester, such as modifying custom metadata, as well as changes made by Cloud Storage on behalf of a requester, such as changing the storage class based on an Object Lifecycle Configuration.", "format": "date-time", "type": "string" } diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 8ef42720013f9..4613ebdfa72aa 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -10,35 +10,35 @@ // // For product documentation, see: https://developers.google.com/storage/docs/json_api/ // -// Creating a client +// # Creating a client // // Usage example: // -// import "google.golang.org/api/storage/v1" -// ... -// ctx := context.Background() -// storageService, err := storage.NewService(ctx) +// import "google.golang.org/api/storage/v1" +// ... +// ctx := context.Background() +// storageService, err := storage.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // -// Other authentication options +// # Other authentication options // // By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: // -// storageService, err := storage.NewService(ctx, option.WithScopes(storage.DevstorageReadWriteScope)) +// storageService, err := storage.NewService(ctx, option.WithScopes(storage.DevstorageReadWriteScope)) // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // -// storageService, err := storage.NewService(ctx, option.WithAPIKey("AIza...")) +// storageService, err := storage.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// storageService, err := storage.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// storageService, err := storage.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package storage // import "google.golang.org/api/storage/v1" @@ -291,6 +291,10 @@ type Bucket struct { // configuration. Cors []*BucketCors `json:"cors,omitempty"` + // CustomPlacementConfig: The bucket's custom placement configuration + // for Custom Dual Regions. + CustomPlacementConfig *BucketCustomPlacementConfig `json:"customPlacementConfig,omitempty"` + // DefaultEventBasedHold: The default value for event-based hold on // newly created objects in this bucket. Event-based hold is a way to // retain objects indefinitely until an event occurs, signified by the @@ -538,6 +542,36 @@ func (s *BucketCors) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BucketCustomPlacementConfig: The bucket's custom placement +// configuration for Custom Dual Regions. +type BucketCustomPlacementConfig struct { + // DataLocations: The list of regional locations in which data is + // placed. + DataLocations []string `json:"dataLocations,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DataLocations") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DataLocations") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketCustomPlacementConfig) MarshalJSON() ([]byte, error) { + type NoMethod BucketCustomPlacementConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BucketEncryption: Encryption configuration for a bucket. type BucketEncryption struct { // DefaultKmsKeyName: A Cloud KMS key that will be used to encrypt @@ -756,8 +790,8 @@ type BucketLifecycleRuleAction struct { // action is SetStorageClass. StorageClass string `json:"storageClass,omitempty"` - // Type: Type of the action. Currently, only Delete and SetStorageClass - // are supported. + // Type: Type of the action. Currently, only Delete, SetStorageClass, + // and AbortIncompleteMultipartUpload are supported. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "StorageClass") to @@ -788,7 +822,7 @@ func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { type BucketLifecycleRuleCondition struct { // Age: Age of an object (in days). This condition is satisfied when an // object reaches the specified age. - Age int64 `json:"age,omitempty"` + Age *int64 `json:"age,omitempty"` // CreatedBefore: A date in RFC 3339 format with only the date part (for // instance, "2013-01-15"). This condition is satisfied when an object @@ -1855,7 +1889,12 @@ type Object struct { TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"` // Updated: The modification time of the object metadata in RFC 3339 - // format. + // format. Set initially to object creation time and then updated + // whenever any metadata of the object changes. This includes changes + // made by a requester, such as modifying custom metadata, as well as + // changes made by Cloud Storage on behalf of a requester, such as + // changing the storage class based on an Object Lifecycle + // Configuration. Updated string `json:"updated,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2445,10 +2484,10 @@ type BucketAccessControlsDeleteCall struct { // Delete: Permanently deletes the ACL entry for the specified entity on // the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -2521,7 +2560,7 @@ func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -2575,10 +2614,10 @@ type BucketAccessControlsGetCall struct { // Get: Returns the ACL entry for the specified entity on the specified // bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -2669,17 +2708,17 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -2826,17 +2865,17 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -2986,17 +3025,17 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControls{ ServerResponse: googleapi.ServerResponse{ @@ -3055,10 +3094,10 @@ type BucketAccessControlsPatchCall struct { // Patch: Patches an ACL entry on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -3142,17 +3181,17 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -3221,10 +3260,10 @@ type BucketAccessControlsUpdateCall struct { // Update: Updates an ACL entry on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -3308,17 +3347,17 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -3472,7 +3511,7 @@ func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -3559,8 +3598,9 @@ func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64 // properties to return. Defaults to noAcl. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { c.urlParams_.Set("projection", projection) return c @@ -3648,17 +3688,17 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -3843,17 +3883,17 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -3929,14 +3969,22 @@ func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsert // predefined set of access controls to this bucket. // // Possible values: -// "authenticatedRead" - Project team owners get OWNER access, and +// +// "authenticatedRead" - Project team owners get OWNER access, and +// // allAuthenticatedUsers get READER access. -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// // their roles. -// "publicRead" - Project team owners get OWNER access, and allUsers +// +// "publicRead" - Project team owners get OWNER access, and allUsers +// // get READER access. -// "publicReadWrite" - Project team owners get OWNER access, and +// +// "publicReadWrite" - Project team owners get OWNER access, and +// // allUsers get WRITER access. func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -3948,16 +3996,26 @@ func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCa // object access controls to this bucket. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) @@ -3970,8 +4028,9 @@ func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAc // full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { c.urlParams_.Set("projection", projection) return c @@ -4048,17 +4107,17 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -4206,8 +4265,9 @@ func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { // properties to return. Defaults to noAcl. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsListCall) Projection(projection string) *BucketsListCall { c.urlParams_.Set("projection", projection) return c @@ -4292,17 +4352,17 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Buckets{ ServerResponse: googleapi.ServerResponse{ @@ -4414,9 +4474,9 @@ type BucketsLockRetentionPolicyCall struct { // LockRetentionPolicy: Locks retention policy on a bucket. // -// - bucket: Name of a bucket. -// - ifMetagenerationMatch: Makes the operation conditional on whether -// bucket's current metageneration matches the given value. +// - bucket: Name of a bucket. +// - ifMetagenerationMatch: Makes the operation conditional on whether +// bucket's current metageneration matches the given value. func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatch int64) *BucketsLockRetentionPolicyCall { c := &BucketsLockRetentionPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -4493,17 +4553,17 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -4602,14 +4662,22 @@ func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int // predefined set of access controls to this bucket. // // Possible values: -// "authenticatedRead" - Project team owners get OWNER access, and +// +// "authenticatedRead" - Project team owners get OWNER access, and +// // allAuthenticatedUsers get READER access. -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// // their roles. -// "publicRead" - Project team owners get OWNER access, and allUsers +// +// "publicRead" - Project team owners get OWNER access, and allUsers +// // get READER access. -// "publicReadWrite" - Project team owners get OWNER access, and +// +// "publicReadWrite" - Project team owners get OWNER access, and +// // allUsers get WRITER access. func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -4621,16 +4689,26 @@ func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall // object access controls to this bucket. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) @@ -4641,8 +4719,9 @@ func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl // properties to return. Defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { c.urlParams_.Set("projection", projection) return c @@ -4722,17 +4801,17 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -4940,17 +5019,17 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -5103,17 +5182,17 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5214,14 +5293,22 @@ func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in // predefined set of access controls to this bucket. // // Possible values: -// "authenticatedRead" - Project team owners get OWNER access, and +// +// "authenticatedRead" - Project team owners get OWNER access, and +// // allAuthenticatedUsers get READER access. -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// // their roles. -// "publicRead" - Project team owners get OWNER access, and allUsers +// +// "publicRead" - Project team owners get OWNER access, and allUsers +// // get READER access. -// "publicReadWrite" - Project team owners get OWNER access, and +// +// "publicReadWrite" - Project team owners get OWNER access, and +// // allUsers get WRITER access. func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -5233,16 +5320,26 @@ func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCa // object access controls to this bucket. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) @@ -5253,8 +5350,9 @@ func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAc // properties to return. Defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { c.urlParams_.Set("projection", projection) return c @@ -5334,17 +5432,17 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -5533,7 +5631,7 @@ func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -5570,10 +5668,10 @@ type DefaultObjectAccessControlsDeleteCall struct { // Delete: Permanently deletes the default object ACL entry for the // specified entity on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -5646,7 +5744,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -5700,10 +5798,10 @@ type DefaultObjectAccessControlsGetCall struct { // Get: Returns the default object ACL entry for the specified entity on // the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -5794,17 +5892,17 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -5952,17 +6050,17 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -6129,17 +6227,17 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControls{ ServerResponse: googleapi.ServerResponse{ @@ -6210,10 +6308,10 @@ type DefaultObjectAccessControlsPatchCall struct { // Patch: Patches a default object ACL entry on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -6297,17 +6395,17 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -6376,10 +6474,10 @@ type DefaultObjectAccessControlsUpdateCall struct { // Update: Updates a default object ACL entry on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -6463,17 +6561,17 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -6615,7 +6713,7 @@ func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -6761,17 +6859,17 @@ func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Notification{ ServerResponse: googleapi.ServerResponse{ @@ -6921,17 +7019,17 @@ func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notificatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Notification{ ServerResponse: googleapi.ServerResponse{ @@ -7083,17 +7181,17 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Notifications{ ServerResponse: googleapi.ServerResponse{ @@ -7156,12 +7254,12 @@ type ObjectAccessControlsDeleteCall struct { // Delete: Permanently deletes the ACL entry for the specified entity on // the specified object. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7244,7 +7342,7 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -7312,12 +7410,12 @@ type ObjectAccessControlsGetCall struct { // Get: Returns the ACL entry for the specified entity on the specified // object. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7418,17 +7516,17 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -7507,9 +7605,9 @@ type ObjectAccessControlsInsertCall struct { // Insert: Creates a new ACL entry on the specified object. // -// - bucket: Name of a bucket. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7601,17 +7699,17 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -7686,9 +7784,9 @@ type ObjectAccessControlsListCall struct { // List: Retrieves ACL entries on the specified object. // -// - bucket: Name of a bucket. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7787,17 +7885,17 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControls{ ServerResponse: googleapi.ServerResponse{ @@ -7870,12 +7968,12 @@ type ObjectAccessControlsPatchCall struct { // Patch: Patches an ACL entry on the specified object. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7969,17 +8067,17 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -8062,12 +8160,12 @@ type ObjectAccessControlsUpdateCall struct { // Update: Updates an ACL entry on the specified object. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -8161,17 +8259,17 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -8254,11 +8352,11 @@ type ObjectsComposeCall struct { // Compose: Concatenates a list of existing objects into a new object in // the same bucket. // -// - destinationBucket: Name of the bucket containing the source -// objects. The destination object is stored in this bucket. -// - destinationObject: Name of the new object. For information about -// how to URL encode object names to be path safe, see Encoding URI -// Path Parts. +// - destinationBucket: Name of the bucket containing the source +// objects. The destination object is stored in this bucket. +// - destinationObject: Name of the new object. For information about +// how to URL encode object names to be path safe, see Encoding URI +// Path Parts. func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.destinationBucket = destinationBucket @@ -8272,16 +8370,26 @@ func (r *ObjectsService) Compose(destinationBucket string, destinationObject str // to the destination object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) @@ -8308,7 +8416,9 @@ func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) // KmsKeyName sets the optional parameter "kmsKeyName": Resource name of // the Cloud KMS key, of the form // projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object +// +// that will be used to encrypt the object. Overrides the object +// // metadata's kms_key_name value, if any. func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { c.urlParams_.Set("kmsKeyName", kmsKeyName) @@ -8390,17 +8500,17 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -8511,18 +8621,18 @@ type ObjectsCopyCall struct { // Copy: Copies a source object to a destination object. Optionally // overrides metadata. // -// - destinationBucket: Name of the bucket in which to store the new -// object. Overrides the provided object metadata's bucket value, if -// any.For information about how to URL encode object names to be path -// safe, see Encoding URI Path Parts. -// - destinationObject: Name of the new object. Required when the object -// metadata is not otherwise provided. Overrides the object metadata's -// name value, if any. -// - sourceBucket: Name of the bucket in which to find the source -// object. -// - sourceObject: Name of the source object. For information about how -// to URL encode object names to be path safe, see Encoding URI Path -// Parts. +// - destinationBucket: Name of the bucket in which to store the new +// object. Overrides the provided object metadata's bucket value, if +// any.For information about how to URL encode object names to be path +// safe, see Encoding URI Path Parts. +// - destinationObject: Name of the new object. Required when the object +// metadata is not otherwise provided. Overrides the object metadata's +// name value, if any. +// - sourceBucket: Name of the bucket in which to find the source +// object. +// - sourceObject: Name of the source object. For information about how +// to URL encode object names to be path safe, see Encoding URI Path +// Parts. func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket @@ -8537,7 +8647,9 @@ func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinat // "destinationKmsKeyName": Resource name of the Cloud KMS key, of the // form // projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object +// +// that will be used to encrypt the object. Overrides the object +// // metadata's kms_key_name value, if any. func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsCopyCall { c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) @@ -8549,16 +8661,26 @@ func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *O // to the destination object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) @@ -8643,8 +8765,9 @@ func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationN // specifies the acl property, when it defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { c.urlParams_.Set("projection", projection) return c @@ -8735,17 +8858,17 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -8923,9 +9046,9 @@ type ObjectsDeleteCall struct { // if versioning is not enabled for the bucket, or if the generation // parameter is used. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9042,7 +9165,7 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -9126,9 +9249,9 @@ type ObjectsGetCall struct { // Get: Retrieves an object or its metadata. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9184,8 +9307,9 @@ func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64 // properties to return. Defaults to noAcl. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { c.urlParams_.Set("projection", projection) return c @@ -9271,7 +9395,7 @@ func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, } if err := googleapi.CheckMediaResponse(res); err != nil { res.Body.Close() - return nil, err + return nil, gensupport.WrapError(err) } return res, nil } @@ -9290,17 +9414,17 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -9414,9 +9538,9 @@ type ObjectsGetIamPolicyCall struct { // GetIamPolicy: Returns an IAM policy for the specified object. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9515,17 +9639,17 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -9601,8 +9725,8 @@ type ObjectsInsertCall struct { // Insert: Stores a new object and metadata. // -// - bucket: Name of the bucket in which to store the new object. -// Overrides the provided object metadata's bucket value, if any. +// - bucket: Name of the bucket in which to store the new object. +// Overrides the provided object metadata's bucket value, if any. func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9660,7 +9784,9 @@ func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in // KmsKeyName sets the optional parameter "kmsKeyName": Resource name of // the Cloud KMS key, of the form // projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object +// +// that will be used to encrypt the object. Overrides the object +// // metadata's kms_key_name value, if any. func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { c.urlParams_.Set("kmsKeyName", kmsKeyName) @@ -9680,16 +9806,26 @@ func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { // predefined set of access controls to this object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -9701,8 +9837,9 @@ func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCa // specifies the acl property, when it defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { c.urlParams_.Set("projection", projection) return c @@ -9864,17 +10001,17 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location")) if rx != nil { @@ -9891,7 +10028,7 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { } defer res.Body.Close() if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } } ret := &Object{ @@ -10108,8 +10245,9 @@ func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { // properties to return. Defaults to noAcl. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { c.urlParams_.Set("projection", projection) return c @@ -10214,17 +10352,17 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Objects{ ServerResponse: googleapi.ServerResponse{ @@ -10364,9 +10502,9 @@ type ObjectsPatchCall struct { // Patch: Patches an object's metadata. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -10423,16 +10561,26 @@ func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int // predefined set of access controls to this object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -10443,8 +10591,9 @@ func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall // properties to return. Defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { c.urlParams_.Set("projection", projection) return c @@ -10525,17 +10674,17 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -10671,18 +10820,18 @@ type ObjectsRewriteCall struct { // Rewrite: Rewrites a source object to a destination object. Optionally // overrides metadata. // -// - destinationBucket: Name of the bucket in which to store the new -// object. Overrides the provided object metadata's bucket value, if -// any. -// - destinationObject: Name of the new object. Required when the object -// metadata is not otherwise provided. Overrides the object metadata's -// name value, if any. For information about how to URL encode object -// names to be path safe, see Encoding URI Path Parts. -// - sourceBucket: Name of the bucket in which to find the source -// object. -// - sourceObject: Name of the source object. For information about how -// to URL encode object names to be path safe, see Encoding URI Path -// Parts. +// - destinationBucket: Name of the bucket in which to store the new +// object. Overrides the provided object metadata's bucket value, if +// any. +// - destinationObject: Name of the new object. Required when the object +// metadata is not otherwise provided. Overrides the object metadata's +// name value, if any. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts. +// - sourceBucket: Name of the bucket in which to find the source +// object. +// - sourceObject: Name of the source object. For information about how +// to URL encode object names to be path safe, see Encoding URI Path +// Parts. func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket @@ -10697,7 +10846,9 @@ func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, desti // "destinationKmsKeyName": Resource name of the Cloud KMS key, of the // form // projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object +// +// that will be used to encrypt the object. Overrides the object +// // metadata's kms_key_name value, if any. func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall { c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) @@ -10709,16 +10860,26 @@ func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) // to the destination object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) @@ -10816,8 +10977,9 @@ func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall i // specifies the acl property, when it defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { c.urlParams_.Set("projection", projection) return c @@ -10919,17 +11081,17 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RewriteResponse{ ServerResponse: googleapi.ServerResponse{ @@ -11117,9 +11279,9 @@ type ObjectsSetIamPolicyCall struct { // SetIamPolicy: Updates an IAM policy for the specified object. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11211,17 +11373,17 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -11298,10 +11460,10 @@ type ObjectsTestIamPermissionsCall struct { // TestIamPermissions: Tests a set of permissions on the given object to // see which, if any, are held by the caller. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -// - permissions: Permissions to test. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. +// - permissions: Permissions to test. func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11401,17 +11563,17 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -11494,9 +11656,9 @@ type ObjectsUpdateCall struct { // Update: Updates an object's metadata. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11553,16 +11715,26 @@ func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in // predefined set of access controls to this object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -11573,8 +11745,9 @@ func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCa // properties to return. Defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { c.urlParams_.Set("projection", projection) return c @@ -11655,17 +11828,17 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -11863,8 +12036,9 @@ func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { // properties to return. Defaults to noAcl. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { c.urlParams_.Set("projection", projection) return c @@ -11961,17 +12135,17 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Channel{ ServerResponse: googleapi.ServerResponse{ @@ -12170,17 +12344,17 @@ func (c *ProjectsHmacKeysCreateCall) Do(opts ...googleapi.CallOption) (*HmacKey, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HmacKey{ ServerResponse: googleapi.ServerResponse{ @@ -12319,7 +12493,7 @@ func (c *ProjectsHmacKeysDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -12373,9 +12547,9 @@ type ProjectsHmacKeysGetCall struct { // Get: Retrieves an HMAC key's metadata // -// - accessId: Name of the HMAC key. -// - projectId: Project ID owning the service account of the requested -// key. +// - accessId: Name of the HMAC key. +// - projectId: Project ID owning the service account of the requested +// key. func (r *ProjectsHmacKeysService) Get(projectId string, accessId string) *ProjectsHmacKeysGetCall { c := &ProjectsHmacKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -12466,17 +12640,17 @@ func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMeta if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HmacKeyMetadata{ ServerResponse: googleapi.ServerResponse{ @@ -12667,17 +12841,17 @@ func (c *ProjectsHmacKeysListCall) Do(opts ...googleapi.CallOption) (*HmacKeysMe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HmacKeysMetadata{ ServerResponse: googleapi.ServerResponse{ @@ -12783,9 +12957,9 @@ type ProjectsHmacKeysUpdateCall struct { // Update: Updates the state of an HMAC key. See the HMAC Key resource // descriptor for valid states. // -// - accessId: Name of the HMAC key being updated. -// - projectId: Project ID owning the service account of the updated -// key. +// - accessId: Name of the HMAC key being updated. +// - projectId: Project ID owning the service account of the updated +// key. func (r *ProjectsHmacKeysService) Update(projectId string, accessId string, hmackeymetadata *HmacKeyMetadata) *ProjectsHmacKeysUpdateCall { c := &ProjectsHmacKeysUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -12869,17 +13043,17 @@ func (c *ProjectsHmacKeysUpdateCall) Do(opts ...googleapi.CallOption) (*HmacKeyM if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HmacKeyMetadata{ ServerResponse: googleapi.ServerResponse{ @@ -13037,17 +13211,17 @@ func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*Servi if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ServiceAccount{ ServerResponse: googleapi.ServerResponse{ diff --git a/vendor/google.golang.org/api/transport/cert/default_cert.go b/vendor/google.golang.org/api/transport/cert/default_cert.go index 04aefec0afa53..21d0251531ceb 100644 --- a/vendor/google.golang.org/api/transport/cert/default_cert.go +++ b/vendor/google.golang.org/api/transport/cert/default_cert.go @@ -14,32 +14,19 @@ package cert import ( "crypto/tls" - "crypto/x509" - "encoding/json" "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "os/user" - "path/filepath" "sync" - "time" -) - -const ( - metadataPath = ".secureConnect" - metadataFile = "context_aware_metadata.json" ) // defaultCertData holds all the variables pertaining to // the default certficate source created by DefaultSource. +// +// A singleton model is used to allow the source to be reused +// by the transport layer. type defaultCertData struct { - once sync.Once - source Source - err error - cachedCertMutex sync.Mutex - cachedCert *tls.Certificate + once sync.Once + source Source + err error } var ( @@ -49,93 +36,23 @@ var ( // Source is a function that can be passed into crypto/tls.Config.GetClientCertificate. type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error) -// DefaultSource returns a certificate source that execs the command specified -// in the file at ~/.secureConnect/context_aware_metadata.json +// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable. +var errSourceUnavailable = errors.New("certificate source is unavailable") + +// DefaultSource returns a certificate source using the preferred EnterpriseCertificateProxySource. +// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource. // -// If that file does not exist, a nil source is returned. +// If neither source is available (due to missing configurations), a nil Source and a nil Error are +// returned to indicate that a default certificate source is unavailable. func DefaultSource() (Source, error) { defaultCert.once.Do(func() { - defaultCert.source, defaultCert.err = newSecureConnectSource() + defaultCert.source, defaultCert.err = NewEnterpriseCertificateProxySource("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.source, defaultCert.err = NewSecureConnectSource("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.source, defaultCert.err = nil, nil + } + } }) return defaultCert.source, defaultCert.err } - -type secureConnectSource struct { - metadata secureConnectMetadata -} - -type secureConnectMetadata struct { - Cmd []string `json:"cert_provider_command"` -} - -// newSecureConnectSource creates a secureConnectSource by reading the well-known file. -func newSecureConnectSource() (Source, error) { - user, err := user.Current() - if err != nil { - // Ignore. - return nil, nil - } - filename := filepath.Join(user.HomeDir, metadataPath, metadataFile) - file, err := ioutil.ReadFile(filename) - if os.IsNotExist(err) { - // Ignore. - return nil, nil - } - if err != nil { - return nil, err - } - - var metadata secureConnectMetadata - if err := json.Unmarshal(file, &metadata); err != nil { - return nil, fmt.Errorf("cert: could not parse JSON in %q: %v", filename, err) - } - if err := validateMetadata(metadata); err != nil { - return nil, fmt.Errorf("cert: invalid config in %q: %v", filename, err) - } - return (&secureConnectSource{ - metadata: metadata, - }).getClientCertificate, nil -} - -func validateMetadata(metadata secureConnectMetadata) error { - if len(metadata.Cmd) == 0 { - return errors.New("empty cert_provider_command") - } - return nil -} - -func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { - defaultCert.cachedCertMutex.Lock() - defer defaultCert.cachedCertMutex.Unlock() - if defaultCert.cachedCert != nil && !isCertificateExpired(defaultCert.cachedCert) { - return defaultCert.cachedCert, nil - } - // Expand OS environment variables in the cert provider command such as "$HOME". - for i := 0; i < len(s.metadata.Cmd); i++ { - s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i]) - } - command := s.metadata.Cmd - data, err := exec.Command(command[0], command[1:]...).Output() - if err != nil { - // TODO(cbro): read stderr for error message? Might contain sensitive info. - return nil, err - } - cert, err := tls.X509KeyPair(data, data) - if err != nil { - return nil, err - } - defaultCert.cachedCert = &cert - return &cert, nil -} - -// isCertificateExpired returns true if the given cert is expired or invalid. -func isCertificateExpired(cert *tls.Certificate) bool { - if len(cert.Certificate) == 0 { - return true - } - parsed, err := x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - return true - } - return time.Now().After(parsed.NotAfter) -} diff --git a/vendor/google.golang.org/api/transport/cert/enterprise_cert.go b/vendor/google.golang.org/api/transport/cert/enterprise_cert.go new file mode 100644 index 0000000000000..eaa52e07c086b --- /dev/null +++ b/vendor/google.golang.org/api/transport/cert/enterprise_cert.go @@ -0,0 +1,56 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "errors" + "os" + + "github.com/googleapis/enterprise-certificate-proxy/client" +) + +type ecpSource struct { + key *client.Key +} + +// NewEnterpriseCertificateProxySource creates a certificate source +// using the Enterprise Certificate Proxy client, which delegates +// certifcate related operations to an OS-specific "signer binary" +// that communicates with the native keystore (ex. keychain on MacOS). +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate issuer and the location of the signer binary. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewEnterpriseCertificateProxySource(configFilePath string) (Source, error) { + key, err := client.Cred(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // Config file missing means Enterprise Certificate Proxy is not supported. + return nil, errSourceUnavailable + } + return nil, err + } + + return (&ecpSource{ + key: key, + }).getClientCertificate, nil +} + +func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + var cert tls.Certificate + cert.PrivateKey = s.key + cert.Certificate = s.key.CertificateChain() + return &cert, nil +} diff --git a/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go b/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go new file mode 100644 index 0000000000000..5913cab801722 --- /dev/null +++ b/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go @@ -0,0 +1,123 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "os/user" + "path/filepath" + "sync" + "time" +) + +const ( + metadataPath = ".secureConnect" + metadataFile = "context_aware_metadata.json" +) + +type secureConnectSource struct { + metadata secureConnectMetadata + + // Cache the cert to avoid executing helper command repeatedly. + cachedCertMutex sync.Mutex + cachedCert *tls.Certificate +} + +type secureConnectMetadata struct { + Cmd []string `json:"cert_provider_command"` +} + +// NewSecureConnectSource creates a certificate source using +// the Secure Connect Helper and its associated metadata file. +// +// The configFilePath points to the location of the context aware metadata file. +// If configFilePath is empty, use the default context aware metadata location. +func NewSecureConnectSource(configFilePath string) (Source, error) { + if configFilePath == "" { + user, err := user.Current() + if err != nil { + // Error locating the default config means Secure Connect is not supported. + return nil, errSourceUnavailable + } + configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) + } + + file, err := ioutil.ReadFile(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // Config file missing means Secure Connect is not supported. + return nil, errSourceUnavailable + } + return nil, err + } + + var metadata secureConnectMetadata + if err := json.Unmarshal(file, &metadata); err != nil { + return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err) + } + if err := validateMetadata(metadata); err != nil { + return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err) + } + return (&secureConnectSource{ + metadata: metadata, + }).getClientCertificate, nil +} + +func validateMetadata(metadata secureConnectMetadata) error { + if len(metadata.Cmd) == 0 { + return errors.New("empty cert_provider_command") + } + return nil +} + +func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + s.cachedCertMutex.Lock() + defer s.cachedCertMutex.Unlock() + if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) { + return s.cachedCert, nil + } + // Expand OS environment variables in the cert provider command such as "$HOME". + for i := 0; i < len(s.metadata.Cmd); i++ { + s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i]) + } + command := s.metadata.Cmd + data, err := exec.Command(command[0], command[1:]...).Output() + if err != nil { + return nil, err + } + cert, err := tls.X509KeyPair(data, data) + if err != nil { + return nil, err + } + s.cachedCert = &cert + return &cert, nil +} + +// isCertificateExpired returns true if the given cert is expired or invalid. +func isCertificateExpired(cert *tls.Certificate) bool { + if len(cert.Certificate) == 0 { + return true + } + parsed, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return true + } + return time.Now().After(parsed.NotAfter) +} diff --git a/vendor/google.golang.org/api/transport/dial.go b/vendor/google.golang.org/api/transport/dial.go new file mode 100644 index 0000000000000..652b8eba51d33 --- /dev/null +++ b/vendor/google.golang.org/api/transport/dial.go @@ -0,0 +1,48 @@ +// Copyright 2015 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package transport + +import ( + "context" + "net/http" + + "golang.org/x/oauth2/google" + "google.golang.org/grpc" + + "google.golang.org/api/internal" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + htransport "google.golang.org/api/transport/http" +) + +// NewHTTPClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewHTTPClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { + return htransport.NewClient(ctx, opts...) +} + +// DialGRPC returns a GRPC connection for use communicating with a Google cloud +// service, configured with the given ClientOptions. +func DialGRPC(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return gtransport.Dial(ctx, opts...) +} + +// DialGRPCInsecure returns an insecure GRPC connection for use communicating +// with fake or mock Google cloud service implementations, such as emulators. +// The connection is configured with the given ClientOptions. +func DialGRPCInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return gtransport.DialInsecure(ctx, opts...) +} + +// Creds constructs a google.Credentials from the information in the options, +// or obtains the default credentials in the same way as google.FindDefaultCredentials. +func Creds(ctx context.Context, opts ...option.ClientOption) (*google.Credentials, error) { + var ds internal.DialSettings + for _, opt := range opts { + opt.Apply(&ds) + } + return internal.Creds(ctx, &ds) +} diff --git a/vendor/google.golang.org/api/transport/doc.go b/vendor/google.golang.org/api/transport/doc.go new file mode 100644 index 0000000000000..7143abee4581b --- /dev/null +++ b/vendor/google.golang.org/api/transport/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transport provides utility methods for creating authenticated +// transports to Google's HTTP and gRPC APIs. It is intended to be used in +// conjunction with google.golang.org/api/option. +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package transport diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index c86f56507f514..efcc8e6c641f9 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" grpcgoogle "google.golang.org/grpc/credentials/google" + grpcinsecure "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/oauth" // Install grpclb, which is required for direct path. @@ -126,10 +127,26 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if err != nil { return nil, err } - var grpcOpts []grpc.DialOption + + var transportCreds credentials.TransportCredentials if insecure { - grpcOpts = []grpc.DialOption{grpc.WithInsecure()} - } else if !o.NoAuth { + transportCreds = grpcinsecure.NewCredentials() + } else { + transportCreds = credentials.NewTLS(&tls.Config{ + GetClientCertificate: clientCertSource, + }) + } + + // Initialize gRPC dial options with transport-level security options. + grpcOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(transportCreds), + } + + // Authentication can only be sent when communicating over a secure connection. + // + // TODO: Should we be more lenient in the future and allow sending credentials + // when dialing an insecure connection? + if !o.NoAuth && !insecure { if o.APIKey != "" { log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.") } @@ -142,8 +159,17 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C o.QuotaProject = internal.QuotaProjectFromCreds(creds) } + grpcOpts = append(grpcOpts, + grpc.WithPerRPCCredentials(grpcTokenSource{ + TokenSource: oauth.TokenSource{creds.TokenSource}, + quotaProject: o.QuotaProject, + requestReason: o.RequestReason, + }), + ) + // Attempt Direct Path: if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() { + // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. grpcOpts = []grpc.DialOption{ grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{oauth.TokenSource{creds.TokenSource}}))} if timeoutDialerOption != nil { @@ -153,9 +179,9 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if strings.EqualFold(os.Getenv(enableDirectPathXds), "true") { // google-c2p resolver target must not have a port number if addr, _, err := net.SplitHostPort(endpoint); err == nil { - endpoint = "google-c2p-experimental:///" + addr + endpoint = "google-c2p:///" + addr } else { - endpoint = "google-c2p-experimental:///" + endpoint + endpoint = "google-c2p:///" + endpoint } } else { if !strings.HasPrefix(endpoint, "dns:///") { @@ -169,18 +195,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`)) } // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor. - } else { - tlsConfig := &tls.Config{ - GetClientCertificate: clientCertSource, - } - grpcOpts = []grpc.DialOption{ - grpc.WithPerRPCCredentials(grpcTokenSource{ - TokenSource: oauth.TokenSource{creds.TokenSource}, - quotaProject: o.QuotaProject, - requestReason: o.RequestReason, - }), - grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), - } } } diff --git a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go index 4bf9e82172398..507cd3ec63ab5 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go +++ b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go @@ -12,7 +12,6 @@ import ( "net" "syscall" - "golang.org/x/sys/unix" "google.golang.org/grpc" ) @@ -20,6 +19,9 @@ const ( // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By // default is 20 seconds. tcpUserTimeoutMilliseconds = 20000 + + // Copied from golang.org/x/sys/unix.TCP_USER_TIMEOUT. + tcpUserTimeoutOp = 0x12 ) func init() { @@ -33,7 +35,7 @@ func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { var syscallErr error controlErr := c.Control(func(fd uintptr) { syscallErr = syscall.SetsockoptInt( - int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, tcpUserTimeoutMilliseconds) + int(fd), syscall.IPPROTO_TCP, tcpUserTimeoutOp, tcpUserTimeoutMilliseconds) }) if syscallErr != nil { return syscallErr diff --git a/vendor/google.golang.org/api/transport/internal/dca/dca.go b/vendor/google.golang.org/api/transport/internal/dca/dca.go index 071586e94463e..78004f0475f8c 100644 --- a/vendor/google.golang.org/api/transport/internal/dca/dca.go +++ b/vendor/google.golang.org/api/transport/internal/dca/dca.go @@ -6,17 +6,17 @@ // Authentication according to https://google.aip.dev/auth/4114 // // The overall logic for DCA is as follows: -// 1. If both endpoint override and client certificate are specified, use them as is. -// 2. If user does not specify client certificate, we will attempt to use default -// client certificate. -// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if -// client certificate is available and defaultEndpoint otherwise. +// 1. If both endpoint override and client certificate are specified, use them as is. +// 2. If user does not specify client certificate, we will attempt to use default +// client certificate. +// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if +// client certificate is available and defaultEndpoint otherwise. // // Implications of the above logic: -// 1. If the user specifies a non-mTLS endpoint override but client certificate is -// available, we will pass along the cert anyway and let the server decide what to do. -// 2. If the user specifies an mTLS endpoint override but client certificate is not -// available, we will not fail-fast, but let backend throw error when connecting. +// 1. If the user specifies a non-mTLS endpoint override but client certificate is +// available, we will pass along the cert anyway and let the server decide what to do. +// 2. If the user specifies an mTLS endpoint override but client certificate is not +// available, we will not fail-fast, but let backend throw error when connecting. // // We would like to avoid introducing client-side logic that parses whether the // endpoint override is an mTLS url, since the url pattern may change at anytime. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 66fdb650f4baa..4c91534d5a9db 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -15,17 +15,20 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/api/client.proto package annotations import ( reflect "reflect" + sync "sync" + api "google.golang.org/genproto/googleapis/api" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" descriptorpb "google.golang.org/protobuf/types/descriptorpb" + durationpb "google.golang.org/protobuf/types/known/durationpb" ) const ( @@ -35,6 +38,1050 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// The organization for which the client libraries are being published. +// Affects the url where generated docs are published, etc. +type ClientLibraryOrganization int32 + +const ( + // Not useful. + ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED ClientLibraryOrganization = 0 + // Google Cloud Platform Org. + ClientLibraryOrganization_CLOUD ClientLibraryOrganization = 1 + // Ads (Advertising) Org. + ClientLibraryOrganization_ADS ClientLibraryOrganization = 2 + // Photos Org. + ClientLibraryOrganization_PHOTOS ClientLibraryOrganization = 3 + // Street View Org. + ClientLibraryOrganization_STREET_VIEW ClientLibraryOrganization = 4 +) + +// Enum value maps for ClientLibraryOrganization. +var ( + ClientLibraryOrganization_name = map[int32]string{ + 0: "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED", + 1: "CLOUD", + 2: "ADS", + 3: "PHOTOS", + 4: "STREET_VIEW", + } + ClientLibraryOrganization_value = map[string]int32{ + "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED": 0, + "CLOUD": 1, + "ADS": 2, + "PHOTOS": 3, + "STREET_VIEW": 4, + } +) + +func (x ClientLibraryOrganization) Enum() *ClientLibraryOrganization { + p := new(ClientLibraryOrganization) + *p = x + return p +} + +func (x ClientLibraryOrganization) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryOrganization) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[0].Descriptor() +} + +func (ClientLibraryOrganization) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[0] +} + +func (x ClientLibraryOrganization) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryOrganization.Descriptor instead. +func (ClientLibraryOrganization) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +// To where should client libraries be published? +type ClientLibraryDestination int32 + +const ( + // Client libraries will neither be generated nor published to package + // managers. + ClientLibraryDestination_CLIENT_LIBRARY_DESTINATION_UNSPECIFIED ClientLibraryDestination = 0 + // Generate the client library in a repo under github.com/googleapis, + // but don't publish it to package managers. + ClientLibraryDestination_GITHUB ClientLibraryDestination = 10 + // Publish the library to package managers like nuget.org and npmjs.com. + ClientLibraryDestination_PACKAGE_MANAGER ClientLibraryDestination = 20 +) + +// Enum value maps for ClientLibraryDestination. +var ( + ClientLibraryDestination_name = map[int32]string{ + 0: "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED", + 10: "GITHUB", + 20: "PACKAGE_MANAGER", + } + ClientLibraryDestination_value = map[string]int32{ + "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED": 0, + "GITHUB": 10, + "PACKAGE_MANAGER": 20, + } +) + +func (x ClientLibraryDestination) Enum() *ClientLibraryDestination { + p := new(ClientLibraryDestination) + *p = x + return p +} + +func (x ClientLibraryDestination) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryDestination) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[1].Descriptor() +} + +func (ClientLibraryDestination) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[1] +} + +func (x ClientLibraryDestination) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryDestination.Descriptor instead. +func (ClientLibraryDestination) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +// Required information for every language. +type CommonLanguageSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Link to automatically generated reference documentation. Example: + // https://cloud.google.com/nodejs/docs/reference/asset/latest + // + // Deprecated: Do not use. + ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` + // The destination where API teams want this client library to be published. + Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` +} + +func (x *CommonLanguageSettings) Reset() { + *x = CommonLanguageSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonLanguageSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonLanguageSettings) ProtoMessage() {} + +func (x *CommonLanguageSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonLanguageSettings.ProtoReflect.Descriptor instead. +func (*CommonLanguageSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +// Deprecated: Do not use. +func (x *CommonLanguageSettings) GetReferenceDocsUri() string { + if x != nil { + return x.ReferenceDocsUri + } + return "" +} + +func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { + if x != nil { + return x.Destinations + } + return nil +} + +// Details about how and where to publish client libraries. +type ClientLibrarySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Version of the API to apply these settings to. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Launch stage of this version of the API. + LaunchStage api.LaunchStage `protobuf:"varint,2,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + // When using transport=rest, the client request will encode enums as + // numbers rather than strings. + RestNumericEnums bool `protobuf:"varint,3,opt,name=rest_numeric_enums,json=restNumericEnums,proto3" json:"rest_numeric_enums,omitempty"` + // Settings for legacy Java features, supported in the Service YAML. + JavaSettings *JavaSettings `protobuf:"bytes,21,opt,name=java_settings,json=javaSettings,proto3" json:"java_settings,omitempty"` + // Settings for C++ client libraries. + CppSettings *CppSettings `protobuf:"bytes,22,opt,name=cpp_settings,json=cppSettings,proto3" json:"cpp_settings,omitempty"` + // Settings for PHP client libraries. + PhpSettings *PhpSettings `protobuf:"bytes,23,opt,name=php_settings,json=phpSettings,proto3" json:"php_settings,omitempty"` + // Settings for Python client libraries. + PythonSettings *PythonSettings `protobuf:"bytes,24,opt,name=python_settings,json=pythonSettings,proto3" json:"python_settings,omitempty"` + // Settings for Node client libraries. + NodeSettings *NodeSettings `protobuf:"bytes,25,opt,name=node_settings,json=nodeSettings,proto3" json:"node_settings,omitempty"` + // Settings for .NET client libraries. + DotnetSettings *DotnetSettings `protobuf:"bytes,26,opt,name=dotnet_settings,json=dotnetSettings,proto3" json:"dotnet_settings,omitempty"` + // Settings for Ruby client libraries. + RubySettings *RubySettings `protobuf:"bytes,27,opt,name=ruby_settings,json=rubySettings,proto3" json:"ruby_settings,omitempty"` + // Settings for Go client libraries. + GoSettings *GoSettings `protobuf:"bytes,28,opt,name=go_settings,json=goSettings,proto3" json:"go_settings,omitempty"` +} + +func (x *ClientLibrarySettings) Reset() { + *x = ClientLibrarySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientLibrarySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientLibrarySettings) ProtoMessage() {} + +func (x *ClientLibrarySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientLibrarySettings.ProtoReflect.Descriptor instead. +func (*ClientLibrarySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientLibrarySettings) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ClientLibrarySettings) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +func (x *ClientLibrarySettings) GetRestNumericEnums() bool { + if x != nil { + return x.RestNumericEnums + } + return false +} + +func (x *ClientLibrarySettings) GetJavaSettings() *JavaSettings { + if x != nil { + return x.JavaSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetCppSettings() *CppSettings { + if x != nil { + return x.CppSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPhpSettings() *PhpSettings { + if x != nil { + return x.PhpSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPythonSettings() *PythonSettings { + if x != nil { + return x.PythonSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetNodeSettings() *NodeSettings { + if x != nil { + return x.NodeSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetDotnetSettings() *DotnetSettings { + if x != nil { + return x.DotnetSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetRubySettings() *RubySettings { + if x != nil { + return x.RubySettings + } + return nil +} + +func (x *ClientLibrarySettings) GetGoSettings() *GoSettings { + if x != nil { + return x.GoSettings + } + return nil +} + +// This message configures the settings for publishing [Google Cloud Client +// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) +// generated from the service config. +type Publishing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of API method settings, e.g. the behavior for methods that use the + // long-running operation pattern. + MethodSettings []*MethodSettings `protobuf:"bytes,2,rep,name=method_settings,json=methodSettings,proto3" json:"method_settings,omitempty"` + // Link to a place that API users can report issues. Example: + // https://issuetracker.google.com/issues/new?component=190865&template=1161103 + NewIssueUri string `protobuf:"bytes,101,opt,name=new_issue_uri,json=newIssueUri,proto3" json:"new_issue_uri,omitempty"` + // Link to product home page. Example: + // https://cloud.google.com/asset-inventory/docs/overview + DocumentationUri string `protobuf:"bytes,102,opt,name=documentation_uri,json=documentationUri,proto3" json:"documentation_uri,omitempty"` + // Used as a tracking tag when collecting data about the APIs developer + // relations artifacts like docs, packages delivered to package managers, + // etc. Example: "speech". + ApiShortName string `protobuf:"bytes,103,opt,name=api_short_name,json=apiShortName,proto3" json:"api_short_name,omitempty"` + // GitHub label to apply to issues and pull requests opened for this API. + GithubLabel string `protobuf:"bytes,104,opt,name=github_label,json=githubLabel,proto3" json:"github_label,omitempty"` + // GitHub teams to be added to CODEOWNERS in the directory in GitHub + // containing source code for the client libraries for this API. + CodeownerGithubTeams []string `protobuf:"bytes,105,rep,name=codeowner_github_teams,json=codeownerGithubTeams,proto3" json:"codeowner_github_teams,omitempty"` + // A prefix used in sample code when demarking regions to be included in + // documentation. + DocTagPrefix string `protobuf:"bytes,106,opt,name=doc_tag_prefix,json=docTagPrefix,proto3" json:"doc_tag_prefix,omitempty"` + // For whom the client library is being published. + Organization ClientLibraryOrganization `protobuf:"varint,107,opt,name=organization,proto3,enum=google.api.ClientLibraryOrganization" json:"organization,omitempty"` + // Client library settings. If the same version string appears multiple + // times in this list, then the last one wins. Settings from earlier + // settings with the same version string are discarded. + LibrarySettings []*ClientLibrarySettings `protobuf:"bytes,109,rep,name=library_settings,json=librarySettings,proto3" json:"library_settings,omitempty"` +} + +func (x *Publishing) Reset() { + *x = Publishing{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Publishing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Publishing) ProtoMessage() {} + +func (x *Publishing) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Publishing.ProtoReflect.Descriptor instead. +func (*Publishing) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{2} +} + +func (x *Publishing) GetMethodSettings() []*MethodSettings { + if x != nil { + return x.MethodSettings + } + return nil +} + +func (x *Publishing) GetNewIssueUri() string { + if x != nil { + return x.NewIssueUri + } + return "" +} + +func (x *Publishing) GetDocumentationUri() string { + if x != nil { + return x.DocumentationUri + } + return "" +} + +func (x *Publishing) GetApiShortName() string { + if x != nil { + return x.ApiShortName + } + return "" +} + +func (x *Publishing) GetGithubLabel() string { + if x != nil { + return x.GithubLabel + } + return "" +} + +func (x *Publishing) GetCodeownerGithubTeams() []string { + if x != nil { + return x.CodeownerGithubTeams + } + return nil +} + +func (x *Publishing) GetDocTagPrefix() string { + if x != nil { + return x.DocTagPrefix + } + return "" +} + +func (x *Publishing) GetOrganization() ClientLibraryOrganization { + if x != nil { + return x.Organization + } + return ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED +} + +func (x *Publishing) GetLibrarySettings() []*ClientLibrarySettings { + if x != nil { + return x.LibrarySettings + } + return nil +} + +// Settings for Java client libraries. +type JavaSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The package name to use in Java. Clobbers the java_package option + // set in the protobuf. This should be used **only** by APIs + // who have already set the language_settings.java.package_name" field + // in gapic.yaml. API teams should use the protobuf java_package option + // where possible. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // library_package: com.google.cloud.pubsub.v1 + LibraryPackage string `protobuf:"bytes,1,opt,name=library_package,json=libraryPackage,proto3" json:"library_package,omitempty"` + // Configure the Java class name to use instead of the service's for its + // corresponding generated GAPIC client. Keys are fully-qualified + // service names as they appear in the protobuf (including the full + // the language_settings.java.interface_names" field in gapic.yaml. API + // teams should otherwise use the service name as it appears in the + // protobuf. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // service_class_names: + // - google.pubsub.v1.Publisher: TopicAdmin + // - google.pubsub.v1.Subscriber: SubscriptionAdmin + ServiceClassNames map[string]string `protobuf:"bytes,2,rep,name=service_class_names,json=serviceClassNames,proto3" json:"service_class_names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,3,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *JavaSettings) Reset() { + *x = JavaSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JavaSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JavaSettings) ProtoMessage() {} + +func (x *JavaSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JavaSettings.ProtoReflect.Descriptor instead. +func (*JavaSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{3} +} + +func (x *JavaSettings) GetLibraryPackage() string { + if x != nil { + return x.LibraryPackage + } + return "" +} + +func (x *JavaSettings) GetServiceClassNames() map[string]string { + if x != nil { + return x.ServiceClassNames + } + return nil +} + +func (x *JavaSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for C++ client libraries. +type CppSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *CppSettings) Reset() { + *x = CppSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CppSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CppSettings) ProtoMessage() {} + +func (x *CppSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CppSettings.ProtoReflect.Descriptor instead. +func (*CppSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{4} +} + +func (x *CppSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Php client libraries. +type PhpSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *PhpSettings) Reset() { + *x = PhpSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PhpSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PhpSettings) ProtoMessage() {} + +func (x *PhpSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PhpSettings.ProtoReflect.Descriptor instead. +func (*PhpSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{5} +} + +func (x *PhpSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Python client libraries. +type PythonSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *PythonSettings) Reset() { + *x = PythonSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings) ProtoMessage() {} + +func (x *PythonSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings.ProtoReflect.Descriptor instead. +func (*PythonSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6} +} + +func (x *PythonSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Node client libraries. +type NodeSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *NodeSettings) Reset() { + *x = NodeSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeSettings) ProtoMessage() {} + +func (x *NodeSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeSettings.ProtoReflect.Descriptor instead. +func (*NodeSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{7} +} + +func (x *NodeSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Dotnet client libraries. +type DotnetSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *DotnetSettings) Reset() { + *x = DotnetSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DotnetSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DotnetSettings) ProtoMessage() {} + +func (x *DotnetSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DotnetSettings.ProtoReflect.Descriptor instead. +func (*DotnetSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{8} +} + +func (x *DotnetSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Ruby client libraries. +type RubySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *RubySettings) Reset() { + *x = RubySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RubySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RubySettings) ProtoMessage() {} + +func (x *RubySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RubySettings.ProtoReflect.Descriptor instead. +func (*RubySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{9} +} + +func (x *RubySettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Go client libraries. +type GoSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *GoSettings) Reset() { + *x = GoSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoSettings) ProtoMessage() {} + +func (x *GoSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoSettings.ProtoReflect.Descriptor instead. +func (*GoSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{10} +} + +func (x *GoSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Describes the generator configuration for a method. +type MethodSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of the method, for which the options below apply. + // This is used to find the method to apply the options. + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Describes settings to use for long-running operations when generating + // API methods for RPCs. Complements RPCs that use the annotations in + // google/longrunning/operations.proto. + // + // Example of a YAML configuration:: + // + // publishing: + // method_behavior: + // - selector: CreateAdDomain + // long_running: + // initial_poll_delay: + // seconds: 60 # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: + // seconds: 360 # 6 minutes + // total_poll_timeout: + // seconds: 54000 # 90 minutes + LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` +} + +func (x *MethodSettings) Reset() { + *x = MethodSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings) ProtoMessage() {} + +func (x *MethodSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings.ProtoReflect.Descriptor instead. +func (*MethodSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11} +} + +func (x *MethodSettings) GetSelector() string { + if x != nil { + return x.Selector + } + return "" +} + +func (x *MethodSettings) GetLongRunning() *MethodSettings_LongRunning { + if x != nil { + return x.LongRunning + } + return nil +} + +// Describes settings to use when generating API methods that use the +// long-running operation pattern. +// All default values below are from those used in the client library +// generators (e.g. +// [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). +type MethodSettings_LongRunning struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Initial delay after which the first poll request will be made. + // Default value: 5 seconds. + InitialPollDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=initial_poll_delay,json=initialPollDelay,proto3" json:"initial_poll_delay,omitempty"` + // Multiplier to gradually increase delay between subsequent polls until it + // reaches max_poll_delay. + // Default value: 1.5. + PollDelayMultiplier float32 `protobuf:"fixed32,2,opt,name=poll_delay_multiplier,json=pollDelayMultiplier,proto3" json:"poll_delay_multiplier,omitempty"` + // Maximum time between two subsequent poll requests. + // Default value: 45 seconds. + MaxPollDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=max_poll_delay,json=maxPollDelay,proto3" json:"max_poll_delay,omitempty"` + // Total polling timeout. + // Default value: 5 minutes. + TotalPollTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=total_poll_timeout,json=totalPollTimeout,proto3" json:"total_poll_timeout,omitempty"` +} + +func (x *MethodSettings_LongRunning) Reset() { + *x = MethodSettings_LongRunning{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings_LongRunning) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings_LongRunning) ProtoMessage() {} + +func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings_LongRunning.ProtoReflect.Descriptor instead. +func (*MethodSettings_LongRunning) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *MethodSettings_LongRunning) GetInitialPollDelay() *durationpb.Duration { + if x != nil { + return x.InitialPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetPollDelayMultiplier() float32 { + if x != nil { + return x.PollDelayMultiplier + } + return 0 +} + +func (x *MethodSettings_LongRunning) GetMaxPollDelay() *durationpb.Duration { + if x != nil { + return x.MaxPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetTotalPollTimeout() *durationpb.Duration { + if x != nil { + return x.TotalPollTimeout + } + return nil +} + var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.MethodOptions)(nil), @@ -78,26 +1125,26 @@ var ( // // For example, the proto RPC and annotation: // - // rpc CreateSubscription(CreateSubscriptionRequest) - // returns (Subscription) { - // option (google.api.method_signature) = "name,topic"; - // } + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } // // Would add the following Java overload (in addition to the method accepting // the request object): // - // public final Subscription createSubscription(String name, String topic) + // public final Subscription createSubscription(String name, String topic) // // The following backwards-compatibility guidelines apply: // - // * Adding this annotation to an unannotated method is backwards + // - Adding this annotation to an unannotated method is backwards // compatible. - // * Adding this annotation to a method which already has existing + // - Adding this annotation to a method which already has existing // method signature annotations is backwards compatible if and only if // the new method signature annotation is last in the sequence. - // * Modifying or removing an existing method signature annotation is + // - Modifying or removing an existing method signature annotation is // a breaking change. - // * Re-ordering existing method signature annotations is a breaking + // - Re-ordering existing method signature annotations is a breaking // change. // // repeated string method_signature = 1051; @@ -111,10 +1158,10 @@ var ( // // Example: // - // service Foo { - // option (google.api.default_host) = "foo.googleapi.com"; - // ... - // } + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } // // optional string default_host = 1049; E_DefaultHost = &file_google_api_client_proto_extTypes[1] @@ -122,22 +1169,22 @@ var ( // // Example: // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform"; - // ... - // } + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } // // If there is more than one scope, use a comma-separated string: // // Example: // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform," - // "https://www.googleapis.com/auth/monitoring"; - // ... - // } + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } // // optional string oauth_scopes = 1050; E_OauthScopes = &file_google_api_client_proto_extTypes[2] @@ -148,44 +1195,278 @@ var File_google_api_client_proto protoreflect.FileDescriptor var file_google_api_client_proto_rawDesc = []byte{ 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, - 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, - 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, + 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x73, + 0x55, 0x72, 0x69, 0x12, 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, + 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05, + 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, + 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e, + 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e, + 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a, + 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61, + 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70, + 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70, + 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72, + 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75, + 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x22, 0xe0, 0x03, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, + 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69, + 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, + 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f, + 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74, + 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a, + 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, + 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, + 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72, + 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, + 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, + 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, + 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, 0x0e, 0x50, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, + 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, + 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, + 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, + 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, + 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, + 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, + 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, + 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, + 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, + 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, + 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0x79, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, + 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, + 0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, + 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, + 0x57, 0x10, 0x04, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, + 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, + 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, + 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, + 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, - 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, - 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, - 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, + 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x73, 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_client_proto_rawDescOnce sync.Once + file_google_api_client_proto_rawDescData = file_google_api_client_proto_rawDesc +) + +func file_google_api_client_proto_rawDescGZIP() []byte { + file_google_api_client_proto_rawDescOnce.Do(func() { + file_google_api_client_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_client_proto_rawDescData) + }) + return file_google_api_client_proto_rawDescData } +var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 14) var file_google_api_client_proto_goTypes = []interface{}{ - (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*MethodSettings_LongRunning)(nil), // 15: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 16: google.api.LaunchStage + (*durationpb.Duration)(nil), // 17: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 18: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 19: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ - 0, // 0: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 1, // 1: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 1, // 2: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 0, // [0:3] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination + 16, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization + 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 21: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 15, // 22: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 17, // 23: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 17, // 24: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 17, // 25: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 18, // 26: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 19, // 27: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 19, // 28: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 29, // [29:29] is the sub-list for method output_type + 29, // [29:29] is the sub-list for method input_type + 29, // [29:29] is the sub-list for extension type_name + 26, // [26:29] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -193,18 +1474,178 @@ func file_google_api_client_proto_init() { if File_google_api_client_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_google_api_client_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonLanguageSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientLibrarySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Publishing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JavaSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CppSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PhpSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DotnetSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RubySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GoSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings_LongRunning); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, + NumEnums: 2, + NumMessages: 14, NumExtensions: 3, NumServices: 0, }, GoTypes: file_google_api_client_proto_goTypes, DependencyIndexes: file_google_api_client_proto_depIdxs, + EnumInfos: file_google_api_client_proto_enumTypes, + MessageInfos: file_google_api_client_proto_msgTypes, ExtensionInfos: file_google_api_client_proto_extTypes, }.Build() File_google_api_client_proto = out.File diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 4f34ab73cba11..6f11b7c500f8d 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -127,19 +127,19 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // Example: // -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } // // This enables an HTTP REST to gRPC mapping as below: // @@ -151,21 +151,21 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // automatically become HTTP query parameters if there is no HTTP request body. // For example: // -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -// } +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } // // This enables a HTTP JSON to RPC mapping as below: // @@ -186,18 +186,18 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // specifies the mapping. Consider a REST update method on the // message resource collection: // -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } // // The following HTTP JSON to RPC mapping is enabled, where the // representation of the JSON in the request body is determined by @@ -213,19 +213,18 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // request body. This enables the following alternative definition of // the update method: // -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } // // The following HTTP JSON to RPC mapping is enabled: // @@ -243,20 +242,20 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // It is possible to define multiple HTTP methods for one RPC by using // the `additional_bindings` option. Example: // -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } // // This enables the following two alternative HTTP JSON to RPC mappings: // @@ -268,15 +267,15 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // ## Rules for HTTP mapping // -// 1. Leaf request fields (recursive expansion nested messages in the request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the URL path. -// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP -// request body. -// - All other fields are passed via the URL query parameters, and the -// parameter name is the field path in the request message. A repeated -// field can be represented as multiple query parameters under the same -// name. +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. // 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields // are passed via URL path and HTTP request body. // 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all @@ -284,12 +283,12 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // ### Path template syntax // -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; // // The syntax `*` matches a single URL path segment. The syntax `**` matches // zero or more URL path segments, which must be the last part of the URL path @@ -338,11 +337,11 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // Example: // -// http: -// rules: -// # Selects a gRPC method and applies HttpRule to it. -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} +// http: +// rules: +// # Selects a gRPC method and applies HttpRule to it. +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} // // ## Special notes // diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 6515668d34f26..13ea54b2940f3 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -157,45 +157,45 @@ func (ResourceDescriptor_Style) EnumDescriptor() ([]byte, []int) { // // Example: // -// message Topic { -// // Indicates this message defines a resource schema. -// // Declares the resource type in the format of {service}/{kind}. -// // For Kubernetes resources, the format is {api group}/{kind}. -// option (google.api.resource) = { -// type: "pubsub.googleapis.com/Topic" -// pattern: "projects/{project}/topics/{topic}" -// }; -// } +// message Topic { +// // Indicates this message defines a resource schema. +// // Declares the resource type in the format of {service}/{kind}. +// // For Kubernetes resources, the format is {api group}/{kind}. +// option (google.api.resource) = { +// type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" +// }; +// } // // The ResourceDescriptor Yaml config will look like: // -// resources: -// - type: "pubsub.googleapis.com/Topic" -// pattern: "projects/{project}/topics/{topic}" +// resources: +// - type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" // // Sometimes, resources have multiple patterns, typically because they can // live under multiple parents. // // Example: // -// message LogEntry { -// option (google.api.resource) = { -// type: "logging.googleapis.com/LogEntry" -// pattern: "projects/{project}/logs/{log}" -// pattern: "folders/{folder}/logs/{log}" -// pattern: "organizations/{organization}/logs/{log}" -// pattern: "billingAccounts/{billing_account}/logs/{log}" -// }; -// } +// message LogEntry { +// option (google.api.resource) = { +// type: "logging.googleapis.com/LogEntry" +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" +// }; +// } // // The ResourceDescriptor Yaml config will look like: // -// resources: -// - type: 'logging.googleapis.com/LogEntry' -// pattern: "projects/{project}/logs/{log}" -// pattern: "folders/{folder}/logs/{log}" -// pattern: "organizations/{organization}/logs/{log}" -// pattern: "billingAccounts/{billing_account}/logs/{log}" +// resources: +// - type: 'logging.googleapis.com/LogEntry' +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" type ResourceDescriptor struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go index dd45cf6e6c1a3..6707a7b1c1d27 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go @@ -44,71 +44,71 @@ const ( // // Message Definition: // -// message Request { -// // The name of the Table -// // Values can be of the following formats: -// // - `projects//tables/` -// // - `projects//instances//tables/
` -// // - `region//zones//tables/
` -// string table_name = 1; -// -// // This value specifies routing for replication. -// // It can be in the following formats: -// // - `profiles/` -// // - a legacy `profile_id` that can be any string -// string app_profile_id = 2; -// } +// message Request { +// // The name of the Table +// // Values can be of the following formats: +// // - `projects//tables/
` +// // - `projects//instances//tables/
` +// // - `region//zones//tables/
` +// string table_name = 1; +// +// // This value specifies routing for replication. +// // It can be in the following formats: +// // - `profiles/` +// // - a legacy `profile_id` that can be any string +// string app_profile_id = 2; +// } // // Example message: // -// { -// table_name: projects/proj_foo/instances/instance_bar/table/table_baz, -// app_profile_id: profiles/prof_qux -// } +// { +// table_name: projects/proj_foo/instances/instance_bar/table/table_baz, +// app_profile_id: profiles/prof_qux +// } // // The routing header consists of one or multiple key-value pairs. Every key // and value must be percent-encoded, and joined together in the format of // `key1=value1&key2=value2`. // In the examples below I am skipping the percent-encoding for readablity. // -// Example 1 +// # Example 1 // // Extracting a field from the request to put into the routing header // unchanged, with the key equal to the field name. // // annotation: // -// option (google.api.routing) = { -// // Take the `app_profile_id`. -// routing_parameters { -// field: "app_profile_id" -// } -// }; +// option (google.api.routing) = { +// // Take the `app_profile_id`. +// routing_parameters { +// field: "app_profile_id" +// } +// }; // // result: // -// x-goog-request-params: app_profile_id=profiles/prof_qux +// x-goog-request-params: app_profile_id=profiles/prof_qux // -// Example 2 +// # Example 2 // // Extracting a field from the request to put into the routing header // unchanged, with the key different from the field name. // // annotation: // -// option (google.api.routing) = { -// // Take the `app_profile_id`, but name it `routing_id` in the header. -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// }; +// option (google.api.routing) = { +// // Take the `app_profile_id`, but name it `routing_id` in the header. +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; // // result: // -// x-goog-request-params: routing_id=profiles/prof_qux +// x-goog-request-params: routing_id=profiles/prof_qux // -// Example 3 +// # Example 3 // // Extracting a field from the request to put into the routing // header, while matching a path template syntax on the field's value. @@ -116,91 +116,91 @@ const ( // NB: it is more useful to send nothing than to send garbage for the purpose // of dynamic routing, since garbage pollutes cache. Thus the matching. // -// Sub-example 3a +// # Sub-example 3a // // The field matches the template. // // annotation: // -// option (google.api.routing) = { -// // Take the `table_name`, if it's well-formed (with project-based -// // syntax). -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=projects/*/instances/*/**}" -// } -// }; +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with project-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; // // result: // -// x-goog-request-params: -// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz // -// Sub-example 3b +// # Sub-example 3b // // The field does not match the template. // // annotation: // -// option (google.api.routing) = { -// // Take the `table_name`, if it's well-formed (with region-based -// // syntax). -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=regions/*/zones/*/**}" -// } -// }; +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with region-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// }; // // result: // -// +// // -// Sub-example 3c +// # Sub-example 3c // // Multiple alternative conflictingly named path templates are // specified. The one that matches is used to construct the header. // // annotation: // -// option (google.api.routing) = { -// // Take the `table_name`, if it's well-formed, whether -// // using the region- or projects-based syntax. +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed, whether +// // using the region- or projects-based syntax. // -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=regions/*/zones/*/**}" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=projects/*/instances/*/**}" -// } -// }; +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; // // result: // -// x-goog-request-params: -// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz // -// Example 4 +// # Example 4 // // Extracting a single routing header key-value pair by matching a // template syntax on (a part of) a single request field. // // annotation: // -// option (google.api.routing) = { -// // Take just the project id from the `table_name` field. -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// }; +// option (google.api.routing) = { +// // Take just the project id from the `table_name` field. +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// }; // // result: // -// x-goog-request-params: routing_id=projects/proj_foo +// x-goog-request-params: routing_id=projects/proj_foo // -// Example 5 +// # Example 5 // // Extracting a single routing header key-value pair by matching // several conflictingly named path templates on (parts of) a single request @@ -208,87 +208,87 @@ const ( // // annotation: // -// option (google.api.routing) = { -// // If the `table_name` does not have instances information, -// // take just the project id for routing. -// // Otherwise take project + instance. -// -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*/instances/*}/**" -// } -// }; +// option (google.api.routing) = { +// // If the `table_name` does not have instances information, +// // take just the project id for routing. +// // Otherwise take project + instance. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*/instances/*}/**" +// } +// }; // // result: // -// x-goog-request-params: -// routing_id=projects/proj_foo/instances/instance_bar +// x-goog-request-params: +// routing_id=projects/proj_foo/instances/instance_bar // -// Example 6 +// # Example 6 // // Extracting multiple routing header key-value pairs by matching // several non-conflicting path templates on (parts of) a single request field. // -// Sub-example 6a +// # Sub-example 6a // // Make the templates strict, so that if the `table_name` does not // have an instance information, nothing is sent. // // annotation: // -// option (google.api.routing) = { -// // The routing code needs two keys instead of one composite -// // but works only for the tables with the "project-instance" name -// // syntax. -// -// routing_parameters { -// field: "table_name" -// path_template: "{project_id=projects/*}/instances/*/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "projects/*/{instance_id=instances/*}/**" -// } -// }; +// option (google.api.routing) = { +// // The routing code needs two keys instead of one composite +// // but works only for the tables with the "project-instance" name +// // syntax. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/instances/*/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; // // result: // -// x-goog-request-params: -// project_id=projects/proj_foo&instance_id=instances/instance_bar +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar // -// Sub-example 6b +// # Sub-example 6b // // Make the templates loose, so that if the `table_name` does not // have an instance information, just the project id part is sent. // // annotation: // -// option (google.api.routing) = { -// // The routing code wants two keys instead of one composite -// // but will work with just the `project_id` for tables without -// // an instance in the `table_name`. -// -// routing_parameters { -// field: "table_name" -// path_template: "{project_id=projects/*}/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "projects/*/{instance_id=instances/*}/**" -// } -// }; +// option (google.api.routing) = { +// // The routing code wants two keys instead of one composite +// // but will work with just the `project_id` for tables without +// // an instance in the `table_name`. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; // // result (is the same as 6a for our example message because it has the instance // information): // -// x-goog-request-params: -// project_id=projects/proj_foo&instance_id=instances/instance_bar +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar // -// Example 7 +// # Example 7 // // Extracting multiple routing header key-value pairs by matching // several path templates on multiple request fields. @@ -301,26 +301,26 @@ const ( // // annotation: // -// option (google.api.routing) = { -// // The routing needs both `project_id` and `routing_id` -// // (from the `app_profile_id` field) for routing. +// option (google.api.routing) = { +// // The routing needs both `project_id` and `routing_id` +// // (from the `app_profile_id` field) for routing. // -// routing_parameters { -// field: "table_name" -// path_template: "{project_id=projects/*}/**" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// }; +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; // // result: // -// x-goog-request-params: -// project_id=projects/proj_foo&routing_id=profiles/prof_qux +// x-goog-request-params: +// project_id=projects/proj_foo&routing_id=profiles/prof_qux // -// Example 8 +// # Example 8 // // Extracting a single routing header key-value pair by matching // several conflictingly named path templates on several request fields. The @@ -328,73 +328,73 @@ const ( // // annotation: // -// option (google.api.routing) = { -// // The `routing_id` can be a project id or a region id depending on -// // the table name format, but only if the `app_profile_id` is not set. -// // If `app_profile_id` is set it should be used instead. -// -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=regions/*}/**" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// }; +// option (google.api.routing) = { +// // The `routing_id` can be a project id or a region id depending on +// // the table name format, but only if the `app_profile_id` is not set. +// // If `app_profile_id` is set it should be used instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=regions/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; // // result: // -// x-goog-request-params: routing_id=profiles/prof_qux +// x-goog-request-params: routing_id=profiles/prof_qux // -// Example 9 +// # Example 9 // // Bringing it all together. // // annotation: // -// option (google.api.routing) = { -// // For routing both `table_location` and a `routing_id` are needed. -// // -// // table_location can be either an instance id or a region+zone id. -// // -// // For `routing_id`, take the value of `app_profile_id` -// // - If it's in the format `profiles/`, send -// // just the `` part. -// // - If it's any other literal, send it as is. -// // If the `app_profile_id` is empty, and the `table_name` starts with -// // the project_id, send that instead. -// -// routing_parameters { -// field: "table_name" -// path_template: "projects/*/{table_location=instances/*}/tables/*" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{table_location=regions/*/zones/*}/tables/*" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "profiles/{routing_id=*}" -// } -// }; +// option (google.api.routing) = { +// // For routing both `table_location` and a `routing_id` are needed. +// // +// // table_location can be either an instance id or a region+zone id. +// // +// // For `routing_id`, take the value of `app_profile_id` +// // - If it's in the format `profiles/`, send +// // just the `` part. +// // - If it's any other literal, send it as is. +// // If the `app_profile_id` is empty, and the `table_name` starts with +// // the project_id, send that instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{table_location=instances/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_location=regions/*/zones/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "profiles/{routing_id=*}" +// } +// }; // // result: // -// x-goog-request-params: -// table_location=instances/instance_bar&routing_id=prof_qux +// x-goog-request-params: +// table_location=instances/instance_bar&routing_id=prof_qux type RoutingRule struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go new file mode 100644 index 0000000000000..7107531377325 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -0,0 +1,203 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.18.1 +// source: google/api/launch_stage.proto + +package api + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The launch stage as defined by [Google Cloud Platform +// Launch Stages](https://cloud.google.com/terms/launch-stages). +type LaunchStage int32 + +const ( + // Do not use this default value. + LaunchStage_LAUNCH_STAGE_UNSPECIFIED LaunchStage = 0 + // The feature is not yet implemented. Users can not use it. + LaunchStage_UNIMPLEMENTED LaunchStage = 6 + // Prelaunch features are hidden from users and are only visible internally. + LaunchStage_PRELAUNCH LaunchStage = 7 + // Early Access features are limited to a closed group of testers. To use + // these features, you must sign up in advance and sign a Trusted Tester + // agreement (which includes confidentiality provisions). These features may + // be unstable, changed in backward-incompatible ways, and are not + // guaranteed to be released. + LaunchStage_EARLY_ACCESS LaunchStage = 1 + // Alpha is a limited availability test for releases before they are cleared + // for widespread use. By Alpha, all significant design issues are resolved + // and we are in the process of verifying functionality. Alpha customers + // need to apply for access, agree to applicable terms, and have their + // projects allowlisted. Alpha releases don't have to be feature complete, + // no SLAs are provided, and there are no technical support obligations, but + // they will be far enough along that customers can actually use them in + // test environments or for limited-use tests -- just like they would in + // normal production cases. + LaunchStage_ALPHA LaunchStage = 2 + // Beta is the point at which we are ready to open a release for any + // customer to use. There are no SLA or technical support obligations in a + // Beta release. Products will be complete from a feature perspective, but + // may have some open outstanding issues. Beta releases are suitable for + // limited production use cases. + LaunchStage_BETA LaunchStage = 3 + // GA features are open to all developers and are considered stable and + // fully qualified for production use. + LaunchStage_GA LaunchStage = 4 + // Deprecated features are scheduled to be shut down and removed. For more + // information, see the "Deprecation Policy" section of our [Terms of + // Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation + // Policy](https://cloud.google.com/terms/deprecation) documentation. + LaunchStage_DEPRECATED LaunchStage = 5 +) + +// Enum value maps for LaunchStage. +var ( + LaunchStage_name = map[int32]string{ + 0: "LAUNCH_STAGE_UNSPECIFIED", + 6: "UNIMPLEMENTED", + 7: "PRELAUNCH", + 1: "EARLY_ACCESS", + 2: "ALPHA", + 3: "BETA", + 4: "GA", + 5: "DEPRECATED", + } + LaunchStage_value = map[string]int32{ + "LAUNCH_STAGE_UNSPECIFIED": 0, + "UNIMPLEMENTED": 6, + "PRELAUNCH": 7, + "EARLY_ACCESS": 1, + "ALPHA": 2, + "BETA": 3, + "GA": 4, + "DEPRECATED": 5, + } +) + +func (x LaunchStage) Enum() *LaunchStage { + p := new(LaunchStage) + *p = x + return p +} + +func (x LaunchStage) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LaunchStage) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_launch_stage_proto_enumTypes[0].Descriptor() +} + +func (LaunchStage) Type() protoreflect.EnumType { + return &file_google_api_launch_stage_proto_enumTypes[0] +} + +func (x LaunchStage) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LaunchStage.Descriptor instead. +func (LaunchStage) EnumDescriptor() ([]byte, []int) { + return file_google_api_launch_stage_proto_rawDescGZIP(), []int{0} +} + +var File_google_api_launch_stage_proto protoreflect.FileDescriptor + +var file_google_api_launch_stage_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, + 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2a, 0x8c, 0x01, 0x0a, 0x0b, + 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x4c, + 0x41, 0x55, 0x4e, 0x43, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, + 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, + 0x50, 0x52, 0x45, 0x4c, 0x41, 0x55, 0x4e, 0x43, 0x48, 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x45, + 0x41, 0x52, 0x4c, 0x59, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x09, 0x0a, + 0x05, 0x41, 0x4c, 0x50, 0x48, 0x41, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x45, 0x54, 0x41, + 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x41, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, + 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x05, 0x42, 0x5a, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x10, 0x4c, 0x61, + 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x61, 0x70, 0x69, 0xa2, + 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_launch_stage_proto_rawDescOnce sync.Once + file_google_api_launch_stage_proto_rawDescData = file_google_api_launch_stage_proto_rawDesc +) + +func file_google_api_launch_stage_proto_rawDescGZIP() []byte { + file_google_api_launch_stage_proto_rawDescOnce.Do(func() { + file_google_api_launch_stage_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_launch_stage_proto_rawDescData) + }) + return file_google_api_launch_stage_proto_rawDescData +} + +var file_google_api_launch_stage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_launch_stage_proto_goTypes = []interface{}{ + (LaunchStage)(0), // 0: google.api.LaunchStage +} +var file_google_api_launch_stage_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_launch_stage_proto_init() } +func file_google_api_launch_stage_proto_init() { + if File_google_api_launch_stage_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_launch_stage_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_launch_stage_proto_goTypes, + DependencyIndexes: file_google_api_launch_stage_proto_depIdxs, + EnumInfos: file_google_api_launch_stage_proto_enumTypes, + }.Build() + File_google_api_launch_stage_proto = out.File + file_google_api_launch_stage_proto_rawDesc = nil + file_google_api_launch_stage_proto_goTypes = nil + file_google_api_launch_stage_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go index b2d4d51a4f2fe..4170de70fddf6 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.13.0 +// protoc v3.12.2 // source: google/bigtable/admin/v2/bigtable_table_admin.proto package admin @@ -65,6 +65,7 @@ type RestoreTableRequest struct { // Required. The source from which to restore. // // Types that are assignable to Source: + // // *RestoreTableRequest_Backup Source isRestoreTableRequest_Source `protobuf_oneof:"source"` } @@ -156,6 +157,7 @@ type RestoreTableMetadata struct { // `source` in [RestoreTableRequest][google.bigtable.admin.v2.RestoreTableRequest]. // // Types that are assignable to SourceInfo: + // // *RestoreTableMetadata_BackupInfo SourceInfo isRestoreTableMetadata_SourceInfo `protobuf_oneof:"source_info"` // If exists, the name of the long-running operation that will be used to @@ -341,15 +343,15 @@ type CreateTableRequest struct { // // Example: // - // * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` - // `"other", "zz"]` - // * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` - // * Key assignment: - // - Tablet 1 `[, apple) => {"a"}.` - // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - // - Tablet 4 `[customer_2, other) => {"customer_2"}.` - // - Tablet 5 `[other, ) => {"other", "zz"}.` + // - Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` + // `"other", "zz"]` + // - initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` + // - Key assignment: + // - Tablet 1 `[, apple) => {"a"}.` + // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` + // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` + // - Tablet 4 `[customer_2, other) => {"customer_2"}.` + // - Tablet 5 `[other, ) => {"other", "zz"}.` InitialSplits []*CreateTableRequest_Split `protobuf:"bytes,4,rep,name=initial_splits,json=initialSplits,proto3" json:"initial_splits,omitempty"` } @@ -505,6 +507,7 @@ type DropRowRangeRequest struct { // Delete all rows or by prefix. // // Types that are assignable to Target: + // // *DropRowRangeRequest_RowKeyPrefix // *DropRowRangeRequest_DeleteAllDataFromTable Target isDropRowRangeRequest_Target `protobuf_oneof:"target"` @@ -799,6 +802,142 @@ func (x *GetTableRequest) GetView() Table_View { return Table_VIEW_UNSPECIFIED } +// The request for +// [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. +type UpdateTableRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The table to update. + // The table's `name` field is used to identify the table to update. + Table *Table `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` + // Required. The list of fields to update. + // A mask specifying which fields (e.g. `deletion_protection`) in the `table` + // field should be updated. This mask is relative to the `table` field, not to + // the request message. The wildcard (*) path is currently not supported. + // Currently UpdateTable is only supported for the following field: + // - `deletion_protection` + // + // If `column_families` is set in `update_mask`, it will return an + // UNIMPLEMENTED error. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateTableRequest) Reset() { + *x = UpdateTableRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateTableRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateTableRequest) ProtoMessage() {} + +func (x *UpdateTableRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateTableRequest.ProtoReflect.Descriptor instead. +func (*UpdateTableRequest) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{9} +} + +func (x *UpdateTableRequest) GetTable() *Table { + if x != nil { + return x.Table + } + return nil +} + +func (x *UpdateTableRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Metadata type for the operation returned by +// [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. +type UpdateTableMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the table being updated. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The time at which this operation started. + StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // If set, the time at which this operation finished or was canceled. + EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` +} + +func (x *UpdateTableMetadata) Reset() { + *x = UpdateTableMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateTableMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateTableMetadata) ProtoMessage() {} + +func (x *UpdateTableMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateTableMetadata.ProtoReflect.Descriptor instead. +func (*UpdateTableMetadata) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{10} +} + +func (x *UpdateTableMetadata) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateTableMetadata) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +func (x *UpdateTableMetadata) GetEndTime() *timestamppb.Timestamp { + if x != nil { + return x.EndTime + } + return nil +} + // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] type DeleteTableRequest struct { @@ -815,7 +954,7 @@ type DeleteTableRequest struct { func (x *DeleteTableRequest) Reset() { *x = DeleteTableRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -828,7 +967,7 @@ func (x *DeleteTableRequest) String() string { func (*DeleteTableRequest) ProtoMessage() {} func (x *DeleteTableRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -841,7 +980,7 @@ func (x *DeleteTableRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteTableRequest.ProtoReflect.Descriptor instead. func (*DeleteTableRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{9} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{11} } func (x *DeleteTableRequest) GetName() string { @@ -851,6 +990,126 @@ func (x *DeleteTableRequest) GetName() string { return "" } +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] +type UndeleteTableRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The unique name of the table to be restored. + // Values are of the form + // `projects/{project}/instances/{instance}/tables/{table}`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *UndeleteTableRequest) Reset() { + *x = UndeleteTableRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UndeleteTableRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UndeleteTableRequest) ProtoMessage() {} + +func (x *UndeleteTableRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UndeleteTableRequest.ProtoReflect.Descriptor instead. +func (*UndeleteTableRequest) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{12} +} + +func (x *UndeleteTableRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Metadata type for the operation returned by +// [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable]. +type UndeleteTableMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the table being restored. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The time at which this operation started. + StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // If set, the time at which this operation finished or was cancelled. + EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` +} + +func (x *UndeleteTableMetadata) Reset() { + *x = UndeleteTableMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UndeleteTableMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UndeleteTableMetadata) ProtoMessage() {} + +func (x *UndeleteTableMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UndeleteTableMetadata.ProtoReflect.Descriptor instead. +func (*UndeleteTableMetadata) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{13} +} + +func (x *UndeleteTableMetadata) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UndeleteTableMetadata) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +func (x *UndeleteTableMetadata) GetEndTime() *timestamppb.Timestamp { + if x != nil { + return x.EndTime + } + return nil +} + // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] type ModifyColumnFamiliesRequest struct { @@ -872,7 +1131,7 @@ type ModifyColumnFamiliesRequest struct { func (x *ModifyColumnFamiliesRequest) Reset() { *x = ModifyColumnFamiliesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -885,7 +1144,7 @@ func (x *ModifyColumnFamiliesRequest) String() string { func (*ModifyColumnFamiliesRequest) ProtoMessage() {} func (x *ModifyColumnFamiliesRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -898,7 +1157,7 @@ func (x *ModifyColumnFamiliesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ModifyColumnFamiliesRequest.ProtoReflect.Descriptor instead. func (*ModifyColumnFamiliesRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{10} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{14} } func (x *ModifyColumnFamiliesRequest) GetName() string { @@ -931,7 +1190,7 @@ type GenerateConsistencyTokenRequest struct { func (x *GenerateConsistencyTokenRequest) Reset() { *x = GenerateConsistencyTokenRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -944,7 +1203,7 @@ func (x *GenerateConsistencyTokenRequest) String() string { func (*GenerateConsistencyTokenRequest) ProtoMessage() {} func (x *GenerateConsistencyTokenRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -957,7 +1216,7 @@ func (x *GenerateConsistencyTokenRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GenerateConsistencyTokenRequest.ProtoReflect.Descriptor instead. func (*GenerateConsistencyTokenRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{11} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{15} } func (x *GenerateConsistencyTokenRequest) GetName() string { @@ -981,7 +1240,7 @@ type GenerateConsistencyTokenResponse struct { func (x *GenerateConsistencyTokenResponse) Reset() { *x = GenerateConsistencyTokenResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -994,7 +1253,7 @@ func (x *GenerateConsistencyTokenResponse) String() string { func (*GenerateConsistencyTokenResponse) ProtoMessage() {} func (x *GenerateConsistencyTokenResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1007,7 +1266,7 @@ func (x *GenerateConsistencyTokenResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GenerateConsistencyTokenResponse.ProtoReflect.Descriptor instead. func (*GenerateConsistencyTokenResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{12} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{16} } func (x *GenerateConsistencyTokenResponse) GetConsistencyToken() string { @@ -1035,7 +1294,7 @@ type CheckConsistencyRequest struct { func (x *CheckConsistencyRequest) Reset() { *x = CheckConsistencyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1048,7 +1307,7 @@ func (x *CheckConsistencyRequest) String() string { func (*CheckConsistencyRequest) ProtoMessage() {} func (x *CheckConsistencyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1061,7 +1320,7 @@ func (x *CheckConsistencyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CheckConsistencyRequest.ProtoReflect.Descriptor instead. func (*CheckConsistencyRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{13} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{17} } func (x *CheckConsistencyRequest) GetName() string { @@ -1093,7 +1352,7 @@ type CheckConsistencyResponse struct { func (x *CheckConsistencyResponse) Reset() { *x = CheckConsistencyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1106,7 +1365,7 @@ func (x *CheckConsistencyResponse) String() string { func (*CheckConsistencyResponse) ProtoMessage() {} func (x *CheckConsistencyResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1119,7 +1378,7 @@ func (x *CheckConsistencyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CheckConsistencyResponse.ProtoReflect.Descriptor instead. func (*CheckConsistencyResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{14} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{18} } func (x *CheckConsistencyResponse) GetConsistent() bool { @@ -1166,7 +1425,7 @@ type SnapshotTableRequest struct { func (x *SnapshotTableRequest) Reset() { *x = SnapshotTableRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1179,7 +1438,7 @@ func (x *SnapshotTableRequest) String() string { func (*SnapshotTableRequest) ProtoMessage() {} func (x *SnapshotTableRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1192,7 +1451,7 @@ func (x *SnapshotTableRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SnapshotTableRequest.ProtoReflect.Descriptor instead. func (*SnapshotTableRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{15} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{19} } func (x *SnapshotTableRequest) GetName() string { @@ -1251,7 +1510,7 @@ type GetSnapshotRequest struct { func (x *GetSnapshotRequest) Reset() { *x = GetSnapshotRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1264,7 +1523,7 @@ func (x *GetSnapshotRequest) String() string { func (*GetSnapshotRequest) ProtoMessage() {} func (x *GetSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1277,7 +1536,7 @@ func (x *GetSnapshotRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSnapshotRequest.ProtoReflect.Descriptor instead. func (*GetSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{16} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{20} } func (x *GetSnapshotRequest) GetName() string { @@ -1315,7 +1574,7 @@ type ListSnapshotsRequest struct { func (x *ListSnapshotsRequest) Reset() { *x = ListSnapshotsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1328,7 +1587,7 @@ func (x *ListSnapshotsRequest) String() string { func (*ListSnapshotsRequest) ProtoMessage() {} func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1341,7 +1600,7 @@ func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListSnapshotsRequest.ProtoReflect.Descriptor instead. func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{17} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{21} } func (x *ListSnapshotsRequest) GetParent() string { @@ -1388,7 +1647,7 @@ type ListSnapshotsResponse struct { func (x *ListSnapshotsResponse) Reset() { *x = ListSnapshotsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1401,7 +1660,7 @@ func (x *ListSnapshotsResponse) String() string { func (*ListSnapshotsResponse) ProtoMessage() {} func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1414,7 +1673,7 @@ func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListSnapshotsResponse.ProtoReflect.Descriptor instead. func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{18} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{22} } func (x *ListSnapshotsResponse) GetSnapshots() []*Snapshot { @@ -1452,7 +1711,7 @@ type DeleteSnapshotRequest struct { func (x *DeleteSnapshotRequest) Reset() { *x = DeleteSnapshotRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1465,7 +1724,7 @@ func (x *DeleteSnapshotRequest) String() string { func (*DeleteSnapshotRequest) ProtoMessage() {} func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1478,7 +1737,7 @@ func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteSnapshotRequest.ProtoReflect.Descriptor instead. func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{19} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{23} } func (x *DeleteSnapshotRequest) GetName() string { @@ -1510,7 +1769,7 @@ type SnapshotTableMetadata struct { func (x *SnapshotTableMetadata) Reset() { *x = SnapshotTableMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1523,7 +1782,7 @@ func (x *SnapshotTableMetadata) String() string { func (*SnapshotTableMetadata) ProtoMessage() {} func (x *SnapshotTableMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1536,7 +1795,7 @@ func (x *SnapshotTableMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use SnapshotTableMetadata.ProtoReflect.Descriptor instead. func (*SnapshotTableMetadata) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{20} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{24} } func (x *SnapshotTableMetadata) GetOriginalRequest() *SnapshotTableRequest { @@ -1583,7 +1842,7 @@ type CreateTableFromSnapshotMetadata struct { func (x *CreateTableFromSnapshotMetadata) Reset() { *x = CreateTableFromSnapshotMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1596,7 +1855,7 @@ func (x *CreateTableFromSnapshotMetadata) String() string { func (*CreateTableFromSnapshotMetadata) ProtoMessage() {} func (x *CreateTableFromSnapshotMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1609,7 +1868,7 @@ func (x *CreateTableFromSnapshotMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateTableFromSnapshotMetadata.ProtoReflect.Descriptor instead. func (*CreateTableFromSnapshotMetadata) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{21} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{25} } func (x *CreateTableFromSnapshotMetadata) GetOriginalRequest() *CreateTableFromSnapshotRequest { @@ -1657,7 +1916,7 @@ type CreateBackupRequest struct { func (x *CreateBackupRequest) Reset() { *x = CreateBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1670,7 +1929,7 @@ func (x *CreateBackupRequest) String() string { func (*CreateBackupRequest) ProtoMessage() {} func (x *CreateBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1683,7 +1942,7 @@ func (x *CreateBackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateBackupRequest.ProtoReflect.Descriptor instead. func (*CreateBackupRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{22} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{26} } func (x *CreateBackupRequest) GetParent() string { @@ -1727,7 +1986,7 @@ type CreateBackupMetadata struct { func (x *CreateBackupMetadata) Reset() { *x = CreateBackupMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1740,7 +1999,7 @@ func (x *CreateBackupMetadata) String() string { func (*CreateBackupMetadata) ProtoMessage() {} func (x *CreateBackupMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1753,7 +2012,7 @@ func (x *CreateBackupMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateBackupMetadata.ProtoReflect.Descriptor instead. func (*CreateBackupMetadata) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{23} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{27} } func (x *CreateBackupMetadata) GetName() string { @@ -1793,7 +2052,7 @@ type UpdateBackupRequest struct { // Required. The backup to update. `backup.name`, and the fields to be updated // as specified by `update_mask` are required. Other fields are ignored. // Update is only supported for the following fields: - // * `backup.expire_time`. + // - `backup.expire_time`. Backup *Backup `protobuf:"bytes,1,opt,name=backup,proto3" json:"backup,omitempty"` // Required. A mask specifying which fields (e.g. `expire_time`) in the // Backup resource should be updated. This mask is relative to the Backup @@ -1806,7 +2065,7 @@ type UpdateBackupRequest struct { func (x *UpdateBackupRequest) Reset() { *x = UpdateBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1819,7 +2078,7 @@ func (x *UpdateBackupRequest) String() string { func (*UpdateBackupRequest) ProtoMessage() {} func (x *UpdateBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1832,7 +2091,7 @@ func (x *UpdateBackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateBackupRequest.ProtoReflect.Descriptor instead. func (*UpdateBackupRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{24} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{28} } func (x *UpdateBackupRequest) GetBackup() *Backup { @@ -1864,7 +2123,7 @@ type GetBackupRequest struct { func (x *GetBackupRequest) Reset() { *x = GetBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1877,7 +2136,7 @@ func (x *GetBackupRequest) String() string { func (*GetBackupRequest) ProtoMessage() {} func (x *GetBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1890,7 +2149,7 @@ func (x *GetBackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBackupRequest.ProtoReflect.Descriptor instead. func (*GetBackupRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{25} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{29} } func (x *GetBackupRequest) GetName() string { @@ -1915,7 +2174,7 @@ type DeleteBackupRequest struct { func (x *DeleteBackupRequest) Reset() { *x = DeleteBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1928,7 +2187,7 @@ func (x *DeleteBackupRequest) String() string { func (*DeleteBackupRequest) ProtoMessage() {} func (x *DeleteBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1941,7 +2200,7 @@ func (x *DeleteBackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteBackupRequest.ProtoReflect.Descriptor instead. func (*DeleteBackupRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{26} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{30} } func (x *DeleteBackupRequest) GetName() string { @@ -1970,13 +2229,13 @@ type ListBackupsRequest struct { // roughly synonymous with equality. Filter rules are case insensitive. // // The fields eligible for filtering are: - // * `name` - // * `source_table` - // * `state` - // * `start_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - // * `end_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - // * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - // * `size_bytes` + // - `name` + // - `source_table` + // - `state` + // - `start_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // - `end_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // - `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // - `size_bytes` // // To filter on multiple expressions, provide each separate expression within // parentheses. By default, each expression is an AND expression. However, @@ -1984,29 +2243,29 @@ type ListBackupsRequest struct { // // Some examples of using filters are: // - // * `name:"exact"` --> The backup's name is the string "exact". - // * `name:howl` --> The backup's name contains the string "howl". - // * `source_table:prod` - // --> The source_table's name contains the string "prod". - // * `state:CREATING` --> The backup is pending creation. - // * `state:READY` --> The backup is fully created and ready for use. - // * `(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")` - // --> The backup name contains the string "howl" and start_time - // of the backup is before 2018-03-28T14:50:00Z. - // * `size_bytes > 10000000000` --> The backup's size is greater than 10GB + // - `name:"exact"` --> The backup's name is the string "exact". + // - `name:howl` --> The backup's name contains the string "howl". + // - `source_table:prod` + // --> The source_table's name contains the string "prod". + // - `state:CREATING` --> The backup is pending creation. + // - `state:READY` --> The backup is fully created and ready for use. + // - `(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")` + // --> The backup name contains the string "howl" and start_time + // of the backup is before 2018-03-28T14:50:00Z. + // - `size_bytes > 10000000000` --> The backup's size is greater than 10GB Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` // An expression for specifying the sort order of the results of the request. // The string value should specify one or more fields in [Backup][google.bigtable.admin.v2.Backup]. The full // syntax is described at https://aip.dev/132#ordering. // // Fields supported are: - // * name - // * source_table - // * expire_time - // * start_time - // * end_time - // * size_bytes - // * state + // - name + // - source_table + // - expire_time + // - start_time + // - end_time + // - size_bytes + // - state // // For example, "start_time". The default sorting order is ascending. // To specify descending order for the field, a suffix " desc" should @@ -2029,7 +2288,7 @@ type ListBackupsRequest struct { func (x *ListBackupsRequest) Reset() { *x = ListBackupsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2042,7 +2301,7 @@ func (x *ListBackupsRequest) String() string { func (*ListBackupsRequest) ProtoMessage() {} func (x *ListBackupsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2055,7 +2314,7 @@ func (x *ListBackupsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListBackupsRequest.ProtoReflect.Descriptor instead. func (*ListBackupsRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{27} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{31} } func (x *ListBackupsRequest) GetParent() string { @@ -2110,7 +2369,7 @@ type ListBackupsResponse struct { func (x *ListBackupsResponse) Reset() { *x = ListBackupsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2123,7 +2382,7 @@ func (x *ListBackupsResponse) String() string { func (*ListBackupsResponse) ProtoMessage() {} func (x *ListBackupsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2136,7 +2395,7 @@ func (x *ListBackupsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListBackupsResponse.ProtoReflect.Descriptor instead. func (*ListBackupsResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{28} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{32} } func (x *ListBackupsResponse) GetBackups() []*Backup { @@ -2166,7 +2425,7 @@ type CreateTableRequest_Split struct { func (x *CreateTableRequest_Split) Reset() { *x = CreateTableRequest_Split{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[29] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2179,7 +2438,7 @@ func (x *CreateTableRequest_Split) String() string { func (*CreateTableRequest_Split) ProtoMessage() {} func (x *CreateTableRequest_Split) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[29] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2210,9 +2469,10 @@ type ModifyColumnFamiliesRequest_Modification struct { // The ID of the column family to be modified. Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Column familiy modifications. + // Column family modifications. // // Types that are assignable to Mod: + // // *ModifyColumnFamiliesRequest_Modification_Create // *ModifyColumnFamiliesRequest_Modification_Update // *ModifyColumnFamiliesRequest_Modification_Drop @@ -2222,7 +2482,7 @@ type ModifyColumnFamiliesRequest_Modification struct { func (x *ModifyColumnFamiliesRequest_Modification) Reset() { *x = ModifyColumnFamiliesRequest_Modification{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2235,7 +2495,7 @@ func (x *ModifyColumnFamiliesRequest_Modification) String() string { func (*ModifyColumnFamiliesRequest_Modification) ProtoMessage() {} func (x *ModifyColumnFamiliesRequest_Modification) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2248,7 +2508,7 @@ func (x *ModifyColumnFamiliesRequest_Modification) ProtoReflect() protoreflect.M // Deprecated: Use ModifyColumnFamiliesRequest_Modification.ProtoReflect.Descriptor instead. func (*ModifyColumnFamiliesRequest_Modification) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{10, 0} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{14, 0} } func (x *ModifyColumnFamiliesRequest_Modification) GetId() string { @@ -2468,114 +2728,165 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x52, 0x04, 0x76, 0x69, 0x65, 0x77, 0x22, 0x54, 0x0a, 0x12, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x22, 0x8e, 0x03, 0x0a, 0x1b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, - 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x6d, 0x0a, 0x0d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, - 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x1a, 0xbf, 0x01, 0x0a, 0x0c, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x40, 0x0a, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x48, 0x00, 0x52, 0x06, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x04, 0x64, 0x72, 0x6f, 0x70, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x64, 0x72, 0x6f, 0x70, 0x42, 0x05, 0x0a, 0x03, - 0x6d, 0x6f, 0x64, 0x22, 0x61, 0x0a, 0x1f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, - 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x4f, 0x0a, 0x20, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, - 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x79, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x3a, 0x0a, 0x18, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, - 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x74, 0x22, 0x93, 0x02, 0x0a, 0x14, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, - 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x26, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, + 0x65, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x52, 0x04, 0x76, 0x69, 0x65, 0x77, 0x22, 0x92, 0x01, 0x0a, + 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, + 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, + 0x6b, 0x22, 0x9b, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, + 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, + 0x54, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x56, 0x0a, 0x14, 0x55, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x73, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x57, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, + 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x9d, 0x01, + 0x0a, 0x15, 0x55, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x8e, 0x03, + 0x0a, 0x1b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, + 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x22, 0x98, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x26, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, - 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x81, 0x01, 0x0a, 0x15, - 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, - 0x5a, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x6d, 0x0a, + 0x0d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, + 0x6c, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x6f, 0x64, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x6d, + 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xbf, 0x01, 0x0a, + 0x0c, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x40, 0x0a, + 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, + 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, + 0x40, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, + 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x48, 0x00, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x12, 0x14, 0x0a, 0x04, 0x64, 0x72, 0x6f, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x00, 0x52, 0x04, 0x64, 0x72, 0x6f, 0x70, 0x42, 0x05, 0x0a, 0x03, 0x6d, 0x6f, 0x64, 0x22, 0x61, + 0x0a, 0x1f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x22, 0x4f, 0x0a, 0x20, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, + 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, + 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x10, + 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x3a, 0x0a, 0x18, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, + 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x93, 0x02, 0x0a, + 0x14, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26, 0x0a, 0x24, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x24, 0x0a, + 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x74, 0x74, 0x6c, + 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x57, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xee, 0x01, 0x0a, 0x15, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x59, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, - 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x98, 0x01, 0x0a, 0x14, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26, 0x0a, 0x24, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, + 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, + 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x40, 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, + 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5a, 0x0a, 0x15, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xee, 0x01, 0x0a, 0x15, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x59, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x0c, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x66, 0x69, + 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x66, 0x69, 0x6e, + 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x82, 0x02, 0x0a, 0x1f, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x63, 0x0a, 0x10, 0x6f, + 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, @@ -2584,401 +2895,410 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x3b, 0x0a, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x0a, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x82, 0x02, 0x0a, - 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x63, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, - 0x65, 0x22, 0xbc, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x26, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, - 0x20, 0x0a, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, - 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x22, 0xbf, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, - 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, - 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, - 0x6d, 0x65, 0x22, 0x96, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x53, 0x0a, 0x10, 0x47, - 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x3f, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2b, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x25, 0x0a, 0x23, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x22, 0x56, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, 0x0a, 0x23, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, - 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x44, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x19, 0x0a, - 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, - 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x79, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x62, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, + 0x52, 0x0a, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xbc, 0x01, 0x0a, + 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26, 0x0a, 0x24, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x06, + 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x07, - 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x32, - 0x98, 0x25, 0x0a, 0x12, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0xab, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x4d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x22, 0x2a, 0x2f, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0xbf, 0x01, 0x0a, 0x14, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x96, 0x01, + 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x53, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, + 0x0a, 0x23, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x56, 0x0a, 0x13, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, 0x0a, 0x23, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x26, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, + 0x72, 0x5f, 0x62, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, + 0x72, 0x42, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0x79, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, + 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x32, 0xb2, 0x28, 0x0a, 0x12, 0x42, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x64, 0x6d, 0x69, + 0x6e, 0x12, 0xab, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x22, 0x4d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x22, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x15, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, + 0x8a, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, + 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x38, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, + 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x95, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x22, 0x3d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x15, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x12, 0x8a, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, - 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x95, 0x01, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x42, 0x22, 0x3d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, + 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3a, 0x01, 0x2a, 0xda, + 0x41, 0x1f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, + 0x64, 0x2c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0xca, 0x41, 0x28, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1f, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xa4, 0x01, 0x0a, + 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, + 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x39, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x1f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0xca, 0x41, 0x28, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x12, 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, - 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0xa4, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0xda, - 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x08, 0x47, 0x65, 0x74, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, - 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x8e, 0x01, 0x0a, - 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x2a, 0x2a, 0x2f, 0x76, 0x32, 0x2f, - 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xcf, 0x01, - 0x0a, 0x14, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, - 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, - 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x5f, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x22, 0x3f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, + 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xce, 0x01, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, + 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x39, 0x32, 0x30, 0x2f, 0x76, + 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x05, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0xda, 0x41, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2c, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0xca, 0x41, 0x1c, 0x0a, 0x05, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x12, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x8e, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x39, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x2a, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, - 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, - 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x12, 0x6e, 0x61, 0x6d, - 0x65, 0x2c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x99, 0x01, 0x0a, 0x0c, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x72, 0x6f, 0x70, - 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x42, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x22, - 0x37, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x72, 0x6f, 0x70, - 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0xe8, 0x01, 0x0a, 0x18, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xc6, 0x01, 0x0a, 0x0d, 0x55, 0x6e, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x66, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x38, 0x22, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x75, + 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0xca, 0x41, 0x1e, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x15, 0x55, 0x6e, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0xcf, 0x01, 0x0a, 0x14, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, + 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x12, 0x35, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, + 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x22, 0x3f, 0x2f, 0x76, 0x32, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, + 0x41, 0x12, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x99, 0x01, 0x0a, 0x0c, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x42, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x3c, 0x22, 0x37, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, + 0x3a, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x3a, 0x01, 0x2a, + 0x12, 0xe8, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, + 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x39, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x55, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x48, 0x22, 0x43, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x3a, 0x01, 0x2a, 0xda, - 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xda, 0x01, 0x0a, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x31, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, - 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, - 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x16, 0x6e, 0x61, 0x6d, - 0x65, 0x2c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x12, 0xea, 0x01, 0x0a, 0x0d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x55, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x48, 0x22, 0x43, 0x2f, 0x76, + 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xda, 0x01, 0x0a, 0x10, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, + 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, + 0x41, 0x16, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x63, 0x79, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0xea, 0x01, 0x0a, 0x0d, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, + 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x89, 0x01, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x38, 0x22, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x24, 0x6e, 0x61, 0x6d, + 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0xca, 0x41, 0x21, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x15, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xa8, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, - 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x89, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x22, 0x33, 0x2f, - 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x2e, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x12, + 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0xbb, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x49, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x12, 0x38, 0x2f, 0x76, 0x32, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x24, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x2c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x69, 0x64, - 0x2c, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0xca, 0x41, 0x21, 0x0a, - 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x15, 0x53, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0xa8, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x12, 0x38, 0x2f, 0x76, 0x32, 0x2f, - 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xbb, 0x01, 0x0a, 0x0d, - 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x2e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x49, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x12, 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, - 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xa2, 0x01, 0x0a, 0x0e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x2f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x2a, 0x38, 0x2f, - 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xa2, + 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x3a, 0x2a, 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0xe0, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, + 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x81, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, 0x36, 0x2f, 0x76, 0x32, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xe0, - 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, - 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, - 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x81, 0x01, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x3a, 0x06, - 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, - 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0xca, 0x41, 0x1e, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x14, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0xa0, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, - 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x45, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xc3, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x73, 0x3a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0xda, 0x41, 0x17, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x2c, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0xca, 0x41, 0x1e, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x12, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xa0, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x62, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x32, 0x3d, - 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x06, 0x62, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0xda, 0x41, 0x12, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2c, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x9c, 0x01, 0x0a, 0x0c, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x45, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x2a, 0x36, 0x2f, 0x76, 0x32, 0x2f, + 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x22, 0x45, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, - 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb3, 0x01, 0x0a, 0x0b, 0x4c, 0x69, - 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xc3, 0x01, 0x0a, 0x0c, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, - 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, - 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, - 0xbb, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x62, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x47, 0x32, 0x3d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0xda, 0x41, 0x12, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, + 0x9c, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, - 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5d, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x3a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x01, 0x2a, 0xca, 0x41, 0x1d, - 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xec, 0x01, - 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, - 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xa0, 0x01, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x8e, 0x01, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x45, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x2a, + 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb3, + 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x47, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0xbb, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, + 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0x5d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x32, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, + 0x01, 0x2a, 0xca, 0x41, 0x1d, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x14, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0xec, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xa0, + 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x8e, 0x01, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0xf3, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xa7, 0x01, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x8e, 0x01, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, 0x32, 0x2f, 0x7b, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xa4, 0x02, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, + 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, + 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x9a, 0x01, 0x22, 0x41, 0x2f, + 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, + 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x3a, 0x01, 0x2a, 0x5a, 0x52, 0x22, 0x4d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, - 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, - 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xf3, 0x01, 0x0a, - 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, - 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, - 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xa7, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x8e, 0x01, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, - 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, - 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, - 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, - 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x12, 0xa4, 0x02, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, - 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, - 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, - 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, - 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x9a, 0x01, 0x22, 0x41, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, - 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x5a, 0x52, - 0x22, 0x4d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, - 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, - 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, - 0x01, 0x2a, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, - 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xde, 0x02, 0xca, 0x41, 0x1c, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0xbb, 0x02, 0x68, - 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2c, 0x68, 0x74, - 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, + 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xde, + 0x02, 0xca, 0x41, 0x1c, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0xd2, 0x41, 0xbb, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, - 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, - 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, - 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, - 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, - 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xdf, 0x01, 0x0a, 0x1c, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x17, 0x42, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, - 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, - 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2c, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, + 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, + 0xdf, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x42, 0x17, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, + 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2993,7 +3313,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP() []by return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescData } -var file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 31) +var file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 35) var file_google_bigtable_admin_v2_bigtable_table_admin_proto_goTypes = []interface{}{ (*RestoreTableRequest)(nil), // 0: google.bigtable.admin.v2.RestoreTableRequest (*RestoreTableMetadata)(nil), // 1: google.bigtable.admin.v2.RestoreTableMetadata @@ -3004,123 +3324,137 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_goTypes = []interfa (*ListTablesRequest)(nil), // 6: google.bigtable.admin.v2.ListTablesRequest (*ListTablesResponse)(nil), // 7: google.bigtable.admin.v2.ListTablesResponse (*GetTableRequest)(nil), // 8: google.bigtable.admin.v2.GetTableRequest - (*DeleteTableRequest)(nil), // 9: google.bigtable.admin.v2.DeleteTableRequest - (*ModifyColumnFamiliesRequest)(nil), // 10: google.bigtable.admin.v2.ModifyColumnFamiliesRequest - (*GenerateConsistencyTokenRequest)(nil), // 11: google.bigtable.admin.v2.GenerateConsistencyTokenRequest - (*GenerateConsistencyTokenResponse)(nil), // 12: google.bigtable.admin.v2.GenerateConsistencyTokenResponse - (*CheckConsistencyRequest)(nil), // 13: google.bigtable.admin.v2.CheckConsistencyRequest - (*CheckConsistencyResponse)(nil), // 14: google.bigtable.admin.v2.CheckConsistencyResponse - (*SnapshotTableRequest)(nil), // 15: google.bigtable.admin.v2.SnapshotTableRequest - (*GetSnapshotRequest)(nil), // 16: google.bigtable.admin.v2.GetSnapshotRequest - (*ListSnapshotsRequest)(nil), // 17: google.bigtable.admin.v2.ListSnapshotsRequest - (*ListSnapshotsResponse)(nil), // 18: google.bigtable.admin.v2.ListSnapshotsResponse - (*DeleteSnapshotRequest)(nil), // 19: google.bigtable.admin.v2.DeleteSnapshotRequest - (*SnapshotTableMetadata)(nil), // 20: google.bigtable.admin.v2.SnapshotTableMetadata - (*CreateTableFromSnapshotMetadata)(nil), // 21: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata - (*CreateBackupRequest)(nil), // 22: google.bigtable.admin.v2.CreateBackupRequest - (*CreateBackupMetadata)(nil), // 23: google.bigtable.admin.v2.CreateBackupMetadata - (*UpdateBackupRequest)(nil), // 24: google.bigtable.admin.v2.UpdateBackupRequest - (*GetBackupRequest)(nil), // 25: google.bigtable.admin.v2.GetBackupRequest - (*DeleteBackupRequest)(nil), // 26: google.bigtable.admin.v2.DeleteBackupRequest - (*ListBackupsRequest)(nil), // 27: google.bigtable.admin.v2.ListBackupsRequest - (*ListBackupsResponse)(nil), // 28: google.bigtable.admin.v2.ListBackupsResponse - (*CreateTableRequest_Split)(nil), // 29: google.bigtable.admin.v2.CreateTableRequest.Split - (*ModifyColumnFamiliesRequest_Modification)(nil), // 30: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification - (RestoreSourceType)(0), // 31: google.bigtable.admin.v2.RestoreSourceType - (*BackupInfo)(nil), // 32: google.bigtable.admin.v2.BackupInfo - (*OperationProgress)(nil), // 33: google.bigtable.admin.v2.OperationProgress - (*Table)(nil), // 34: google.bigtable.admin.v2.Table - (Table_View)(0), // 35: google.bigtable.admin.v2.Table.View - (*durationpb.Duration)(nil), // 36: google.protobuf.Duration - (*Snapshot)(nil), // 37: google.bigtable.admin.v2.Snapshot - (*timestamppb.Timestamp)(nil), // 38: google.protobuf.Timestamp - (*Backup)(nil), // 39: google.bigtable.admin.v2.Backup + (*UpdateTableRequest)(nil), // 9: google.bigtable.admin.v2.UpdateTableRequest + (*UpdateTableMetadata)(nil), // 10: google.bigtable.admin.v2.UpdateTableMetadata + (*DeleteTableRequest)(nil), // 11: google.bigtable.admin.v2.DeleteTableRequest + (*UndeleteTableRequest)(nil), // 12: google.bigtable.admin.v2.UndeleteTableRequest + (*UndeleteTableMetadata)(nil), // 13: google.bigtable.admin.v2.UndeleteTableMetadata + (*ModifyColumnFamiliesRequest)(nil), // 14: google.bigtable.admin.v2.ModifyColumnFamiliesRequest + (*GenerateConsistencyTokenRequest)(nil), // 15: google.bigtable.admin.v2.GenerateConsistencyTokenRequest + (*GenerateConsistencyTokenResponse)(nil), // 16: google.bigtable.admin.v2.GenerateConsistencyTokenResponse + (*CheckConsistencyRequest)(nil), // 17: google.bigtable.admin.v2.CheckConsistencyRequest + (*CheckConsistencyResponse)(nil), // 18: google.bigtable.admin.v2.CheckConsistencyResponse + (*SnapshotTableRequest)(nil), // 19: google.bigtable.admin.v2.SnapshotTableRequest + (*GetSnapshotRequest)(nil), // 20: google.bigtable.admin.v2.GetSnapshotRequest + (*ListSnapshotsRequest)(nil), // 21: google.bigtable.admin.v2.ListSnapshotsRequest + (*ListSnapshotsResponse)(nil), // 22: google.bigtable.admin.v2.ListSnapshotsResponse + (*DeleteSnapshotRequest)(nil), // 23: google.bigtable.admin.v2.DeleteSnapshotRequest + (*SnapshotTableMetadata)(nil), // 24: google.bigtable.admin.v2.SnapshotTableMetadata + (*CreateTableFromSnapshotMetadata)(nil), // 25: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata + (*CreateBackupRequest)(nil), // 26: google.bigtable.admin.v2.CreateBackupRequest + (*CreateBackupMetadata)(nil), // 27: google.bigtable.admin.v2.CreateBackupMetadata + (*UpdateBackupRequest)(nil), // 28: google.bigtable.admin.v2.UpdateBackupRequest + (*GetBackupRequest)(nil), // 29: google.bigtable.admin.v2.GetBackupRequest + (*DeleteBackupRequest)(nil), // 30: google.bigtable.admin.v2.DeleteBackupRequest + (*ListBackupsRequest)(nil), // 31: google.bigtable.admin.v2.ListBackupsRequest + (*ListBackupsResponse)(nil), // 32: google.bigtable.admin.v2.ListBackupsResponse + (*CreateTableRequest_Split)(nil), // 33: google.bigtable.admin.v2.CreateTableRequest.Split + (*ModifyColumnFamiliesRequest_Modification)(nil), // 34: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification + (RestoreSourceType)(0), // 35: google.bigtable.admin.v2.RestoreSourceType + (*BackupInfo)(nil), // 36: google.bigtable.admin.v2.BackupInfo + (*OperationProgress)(nil), // 37: google.bigtable.admin.v2.OperationProgress + (*Table)(nil), // 38: google.bigtable.admin.v2.Table + (Table_View)(0), // 39: google.bigtable.admin.v2.Table.View (*fieldmaskpb.FieldMask)(nil), // 40: google.protobuf.FieldMask - (*ColumnFamily)(nil), // 41: google.bigtable.admin.v2.ColumnFamily - (*v1.GetIamPolicyRequest)(nil), // 42: google.iam.v1.GetIamPolicyRequest - (*v1.SetIamPolicyRequest)(nil), // 43: google.iam.v1.SetIamPolicyRequest - (*v1.TestIamPermissionsRequest)(nil), // 44: google.iam.v1.TestIamPermissionsRequest - (*longrunning.Operation)(nil), // 45: google.longrunning.Operation - (*emptypb.Empty)(nil), // 46: google.protobuf.Empty - (*v1.Policy)(nil), // 47: google.iam.v1.Policy - (*v1.TestIamPermissionsResponse)(nil), // 48: google.iam.v1.TestIamPermissionsResponse + (*timestamppb.Timestamp)(nil), // 41: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 42: google.protobuf.Duration + (*Snapshot)(nil), // 43: google.bigtable.admin.v2.Snapshot + (*Backup)(nil), // 44: google.bigtable.admin.v2.Backup + (*ColumnFamily)(nil), // 45: google.bigtable.admin.v2.ColumnFamily + (*v1.GetIamPolicyRequest)(nil), // 46: google.iam.v1.GetIamPolicyRequest + (*v1.SetIamPolicyRequest)(nil), // 47: google.iam.v1.SetIamPolicyRequest + (*v1.TestIamPermissionsRequest)(nil), // 48: google.iam.v1.TestIamPermissionsRequest + (*longrunning.Operation)(nil), // 49: google.longrunning.Operation + (*emptypb.Empty)(nil), // 50: google.protobuf.Empty + (*v1.Policy)(nil), // 51: google.iam.v1.Policy + (*v1.TestIamPermissionsResponse)(nil), // 52: google.iam.v1.TestIamPermissionsResponse } var file_google_bigtable_admin_v2_bigtable_table_admin_proto_depIdxs = []int32{ - 31, // 0: google.bigtable.admin.v2.RestoreTableMetadata.source_type:type_name -> google.bigtable.admin.v2.RestoreSourceType - 32, // 1: google.bigtable.admin.v2.RestoreTableMetadata.backup_info:type_name -> google.bigtable.admin.v2.BackupInfo - 33, // 2: google.bigtable.admin.v2.RestoreTableMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress - 33, // 3: google.bigtable.admin.v2.OptimizeRestoredTableMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress - 34, // 4: google.bigtable.admin.v2.CreateTableRequest.table:type_name -> google.bigtable.admin.v2.Table - 29, // 5: google.bigtable.admin.v2.CreateTableRequest.initial_splits:type_name -> google.bigtable.admin.v2.CreateTableRequest.Split - 35, // 6: google.bigtable.admin.v2.ListTablesRequest.view:type_name -> google.bigtable.admin.v2.Table.View - 34, // 7: google.bigtable.admin.v2.ListTablesResponse.tables:type_name -> google.bigtable.admin.v2.Table - 35, // 8: google.bigtable.admin.v2.GetTableRequest.view:type_name -> google.bigtable.admin.v2.Table.View - 30, // 9: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications:type_name -> google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification - 36, // 10: google.bigtable.admin.v2.SnapshotTableRequest.ttl:type_name -> google.protobuf.Duration - 37, // 11: google.bigtable.admin.v2.ListSnapshotsResponse.snapshots:type_name -> google.bigtable.admin.v2.Snapshot - 15, // 12: google.bigtable.admin.v2.SnapshotTableMetadata.original_request:type_name -> google.bigtable.admin.v2.SnapshotTableRequest - 38, // 13: google.bigtable.admin.v2.SnapshotTableMetadata.request_time:type_name -> google.protobuf.Timestamp - 38, // 14: google.bigtable.admin.v2.SnapshotTableMetadata.finish_time:type_name -> google.protobuf.Timestamp - 4, // 15: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.original_request:type_name -> google.bigtable.admin.v2.CreateTableFromSnapshotRequest - 38, // 16: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.request_time:type_name -> google.protobuf.Timestamp - 38, // 17: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.finish_time:type_name -> google.protobuf.Timestamp - 39, // 18: google.bigtable.admin.v2.CreateBackupRequest.backup:type_name -> google.bigtable.admin.v2.Backup - 38, // 19: google.bigtable.admin.v2.CreateBackupMetadata.start_time:type_name -> google.protobuf.Timestamp - 38, // 20: google.bigtable.admin.v2.CreateBackupMetadata.end_time:type_name -> google.protobuf.Timestamp - 39, // 21: google.bigtable.admin.v2.UpdateBackupRequest.backup:type_name -> google.bigtable.admin.v2.Backup - 40, // 22: google.bigtable.admin.v2.UpdateBackupRequest.update_mask:type_name -> google.protobuf.FieldMask - 39, // 23: google.bigtable.admin.v2.ListBackupsResponse.backups:type_name -> google.bigtable.admin.v2.Backup - 41, // 24: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create:type_name -> google.bigtable.admin.v2.ColumnFamily - 41, // 25: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update:type_name -> google.bigtable.admin.v2.ColumnFamily - 3, // 26: google.bigtable.admin.v2.BigtableTableAdmin.CreateTable:input_type -> google.bigtable.admin.v2.CreateTableRequest - 4, // 27: google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot:input_type -> google.bigtable.admin.v2.CreateTableFromSnapshotRequest - 6, // 28: google.bigtable.admin.v2.BigtableTableAdmin.ListTables:input_type -> google.bigtable.admin.v2.ListTablesRequest - 8, // 29: google.bigtable.admin.v2.BigtableTableAdmin.GetTable:input_type -> google.bigtable.admin.v2.GetTableRequest - 9, // 30: google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable:input_type -> google.bigtable.admin.v2.DeleteTableRequest - 10, // 31: google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies:input_type -> google.bigtable.admin.v2.ModifyColumnFamiliesRequest - 5, // 32: google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange:input_type -> google.bigtable.admin.v2.DropRowRangeRequest - 11, // 33: google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken:input_type -> google.bigtable.admin.v2.GenerateConsistencyTokenRequest - 13, // 34: google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency:input_type -> google.bigtable.admin.v2.CheckConsistencyRequest - 15, // 35: google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable:input_type -> google.bigtable.admin.v2.SnapshotTableRequest - 16, // 36: google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot:input_type -> google.bigtable.admin.v2.GetSnapshotRequest - 17, // 37: google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots:input_type -> google.bigtable.admin.v2.ListSnapshotsRequest - 19, // 38: google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot:input_type -> google.bigtable.admin.v2.DeleteSnapshotRequest - 22, // 39: google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup:input_type -> google.bigtable.admin.v2.CreateBackupRequest - 25, // 40: google.bigtable.admin.v2.BigtableTableAdmin.GetBackup:input_type -> google.bigtable.admin.v2.GetBackupRequest - 24, // 41: google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup:input_type -> google.bigtable.admin.v2.UpdateBackupRequest - 26, // 42: google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup:input_type -> google.bigtable.admin.v2.DeleteBackupRequest - 27, // 43: google.bigtable.admin.v2.BigtableTableAdmin.ListBackups:input_type -> google.bigtable.admin.v2.ListBackupsRequest - 0, // 44: google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable:input_type -> google.bigtable.admin.v2.RestoreTableRequest - 42, // 45: google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 43, // 46: google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 44, // 47: google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 34, // 48: google.bigtable.admin.v2.BigtableTableAdmin.CreateTable:output_type -> google.bigtable.admin.v2.Table - 45, // 49: google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot:output_type -> google.longrunning.Operation - 7, // 50: google.bigtable.admin.v2.BigtableTableAdmin.ListTables:output_type -> google.bigtable.admin.v2.ListTablesResponse - 34, // 51: google.bigtable.admin.v2.BigtableTableAdmin.GetTable:output_type -> google.bigtable.admin.v2.Table - 46, // 52: google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable:output_type -> google.protobuf.Empty - 34, // 53: google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies:output_type -> google.bigtable.admin.v2.Table - 46, // 54: google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange:output_type -> google.protobuf.Empty - 12, // 55: google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken:output_type -> google.bigtable.admin.v2.GenerateConsistencyTokenResponse - 14, // 56: google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency:output_type -> google.bigtable.admin.v2.CheckConsistencyResponse - 45, // 57: google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable:output_type -> google.longrunning.Operation - 37, // 58: google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot:output_type -> google.bigtable.admin.v2.Snapshot - 18, // 59: google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots:output_type -> google.bigtable.admin.v2.ListSnapshotsResponse - 46, // 60: google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot:output_type -> google.protobuf.Empty - 45, // 61: google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup:output_type -> google.longrunning.Operation - 39, // 62: google.bigtable.admin.v2.BigtableTableAdmin.GetBackup:output_type -> google.bigtable.admin.v2.Backup - 39, // 63: google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup:output_type -> google.bigtable.admin.v2.Backup - 46, // 64: google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup:output_type -> google.protobuf.Empty - 28, // 65: google.bigtable.admin.v2.BigtableTableAdmin.ListBackups:output_type -> google.bigtable.admin.v2.ListBackupsResponse - 45, // 66: google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable:output_type -> google.longrunning.Operation - 47, // 67: google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy:output_type -> google.iam.v1.Policy - 47, // 68: google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy:output_type -> google.iam.v1.Policy - 48, // 69: google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 48, // [48:70] is the sub-list for method output_type - 26, // [26:48] is the sub-list for method input_type - 26, // [26:26] is the sub-list for extension type_name - 26, // [26:26] is the sub-list for extension extendee - 0, // [0:26] is the sub-list for field type_name + 35, // 0: google.bigtable.admin.v2.RestoreTableMetadata.source_type:type_name -> google.bigtable.admin.v2.RestoreSourceType + 36, // 1: google.bigtable.admin.v2.RestoreTableMetadata.backup_info:type_name -> google.bigtable.admin.v2.BackupInfo + 37, // 2: google.bigtable.admin.v2.RestoreTableMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress + 37, // 3: google.bigtable.admin.v2.OptimizeRestoredTableMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress + 38, // 4: google.bigtable.admin.v2.CreateTableRequest.table:type_name -> google.bigtable.admin.v2.Table + 33, // 5: google.bigtable.admin.v2.CreateTableRequest.initial_splits:type_name -> google.bigtable.admin.v2.CreateTableRequest.Split + 39, // 6: google.bigtable.admin.v2.ListTablesRequest.view:type_name -> google.bigtable.admin.v2.Table.View + 38, // 7: google.bigtable.admin.v2.ListTablesResponse.tables:type_name -> google.bigtable.admin.v2.Table + 39, // 8: google.bigtable.admin.v2.GetTableRequest.view:type_name -> google.bigtable.admin.v2.Table.View + 38, // 9: google.bigtable.admin.v2.UpdateTableRequest.table:type_name -> google.bigtable.admin.v2.Table + 40, // 10: google.bigtable.admin.v2.UpdateTableRequest.update_mask:type_name -> google.protobuf.FieldMask + 41, // 11: google.bigtable.admin.v2.UpdateTableMetadata.start_time:type_name -> google.protobuf.Timestamp + 41, // 12: google.bigtable.admin.v2.UpdateTableMetadata.end_time:type_name -> google.protobuf.Timestamp + 41, // 13: google.bigtable.admin.v2.UndeleteTableMetadata.start_time:type_name -> google.protobuf.Timestamp + 41, // 14: google.bigtable.admin.v2.UndeleteTableMetadata.end_time:type_name -> google.protobuf.Timestamp + 34, // 15: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications:type_name -> google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification + 42, // 16: google.bigtable.admin.v2.SnapshotTableRequest.ttl:type_name -> google.protobuf.Duration + 43, // 17: google.bigtable.admin.v2.ListSnapshotsResponse.snapshots:type_name -> google.bigtable.admin.v2.Snapshot + 19, // 18: google.bigtable.admin.v2.SnapshotTableMetadata.original_request:type_name -> google.bigtable.admin.v2.SnapshotTableRequest + 41, // 19: google.bigtable.admin.v2.SnapshotTableMetadata.request_time:type_name -> google.protobuf.Timestamp + 41, // 20: google.bigtable.admin.v2.SnapshotTableMetadata.finish_time:type_name -> google.protobuf.Timestamp + 4, // 21: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.original_request:type_name -> google.bigtable.admin.v2.CreateTableFromSnapshotRequest + 41, // 22: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.request_time:type_name -> google.protobuf.Timestamp + 41, // 23: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.finish_time:type_name -> google.protobuf.Timestamp + 44, // 24: google.bigtable.admin.v2.CreateBackupRequest.backup:type_name -> google.bigtable.admin.v2.Backup + 41, // 25: google.bigtable.admin.v2.CreateBackupMetadata.start_time:type_name -> google.protobuf.Timestamp + 41, // 26: google.bigtable.admin.v2.CreateBackupMetadata.end_time:type_name -> google.protobuf.Timestamp + 44, // 27: google.bigtable.admin.v2.UpdateBackupRequest.backup:type_name -> google.bigtable.admin.v2.Backup + 40, // 28: google.bigtable.admin.v2.UpdateBackupRequest.update_mask:type_name -> google.protobuf.FieldMask + 44, // 29: google.bigtable.admin.v2.ListBackupsResponse.backups:type_name -> google.bigtable.admin.v2.Backup + 45, // 30: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create:type_name -> google.bigtable.admin.v2.ColumnFamily + 45, // 31: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update:type_name -> google.bigtable.admin.v2.ColumnFamily + 3, // 32: google.bigtable.admin.v2.BigtableTableAdmin.CreateTable:input_type -> google.bigtable.admin.v2.CreateTableRequest + 4, // 33: google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot:input_type -> google.bigtable.admin.v2.CreateTableFromSnapshotRequest + 6, // 34: google.bigtable.admin.v2.BigtableTableAdmin.ListTables:input_type -> google.bigtable.admin.v2.ListTablesRequest + 8, // 35: google.bigtable.admin.v2.BigtableTableAdmin.GetTable:input_type -> google.bigtable.admin.v2.GetTableRequest + 9, // 36: google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable:input_type -> google.bigtable.admin.v2.UpdateTableRequest + 11, // 37: google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable:input_type -> google.bigtable.admin.v2.DeleteTableRequest + 12, // 38: google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable:input_type -> google.bigtable.admin.v2.UndeleteTableRequest + 14, // 39: google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies:input_type -> google.bigtable.admin.v2.ModifyColumnFamiliesRequest + 5, // 40: google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange:input_type -> google.bigtable.admin.v2.DropRowRangeRequest + 15, // 41: google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken:input_type -> google.bigtable.admin.v2.GenerateConsistencyTokenRequest + 17, // 42: google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency:input_type -> google.bigtable.admin.v2.CheckConsistencyRequest + 19, // 43: google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable:input_type -> google.bigtable.admin.v2.SnapshotTableRequest + 20, // 44: google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot:input_type -> google.bigtable.admin.v2.GetSnapshotRequest + 21, // 45: google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots:input_type -> google.bigtable.admin.v2.ListSnapshotsRequest + 23, // 46: google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot:input_type -> google.bigtable.admin.v2.DeleteSnapshotRequest + 26, // 47: google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup:input_type -> google.bigtable.admin.v2.CreateBackupRequest + 29, // 48: google.bigtable.admin.v2.BigtableTableAdmin.GetBackup:input_type -> google.bigtable.admin.v2.GetBackupRequest + 28, // 49: google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup:input_type -> google.bigtable.admin.v2.UpdateBackupRequest + 30, // 50: google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup:input_type -> google.bigtable.admin.v2.DeleteBackupRequest + 31, // 51: google.bigtable.admin.v2.BigtableTableAdmin.ListBackups:input_type -> google.bigtable.admin.v2.ListBackupsRequest + 0, // 52: google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable:input_type -> google.bigtable.admin.v2.RestoreTableRequest + 46, // 53: google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 47, // 54: google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 48, // 55: google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 38, // 56: google.bigtable.admin.v2.BigtableTableAdmin.CreateTable:output_type -> google.bigtable.admin.v2.Table + 49, // 57: google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot:output_type -> google.longrunning.Operation + 7, // 58: google.bigtable.admin.v2.BigtableTableAdmin.ListTables:output_type -> google.bigtable.admin.v2.ListTablesResponse + 38, // 59: google.bigtable.admin.v2.BigtableTableAdmin.GetTable:output_type -> google.bigtable.admin.v2.Table + 49, // 60: google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable:output_type -> google.longrunning.Operation + 50, // 61: google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable:output_type -> google.protobuf.Empty + 49, // 62: google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable:output_type -> google.longrunning.Operation + 38, // 63: google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies:output_type -> google.bigtable.admin.v2.Table + 50, // 64: google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange:output_type -> google.protobuf.Empty + 16, // 65: google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken:output_type -> google.bigtable.admin.v2.GenerateConsistencyTokenResponse + 18, // 66: google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency:output_type -> google.bigtable.admin.v2.CheckConsistencyResponse + 49, // 67: google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable:output_type -> google.longrunning.Operation + 43, // 68: google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot:output_type -> google.bigtable.admin.v2.Snapshot + 22, // 69: google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots:output_type -> google.bigtable.admin.v2.ListSnapshotsResponse + 50, // 70: google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot:output_type -> google.protobuf.Empty + 49, // 71: google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup:output_type -> google.longrunning.Operation + 44, // 72: google.bigtable.admin.v2.BigtableTableAdmin.GetBackup:output_type -> google.bigtable.admin.v2.Backup + 44, // 73: google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup:output_type -> google.bigtable.admin.v2.Backup + 50, // 74: google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup:output_type -> google.protobuf.Empty + 32, // 75: google.bigtable.admin.v2.BigtableTableAdmin.ListBackups:output_type -> google.bigtable.admin.v2.ListBackupsResponse + 49, // 76: google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable:output_type -> google.longrunning.Operation + 51, // 77: google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy:output_type -> google.iam.v1.Policy + 51, // 78: google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy:output_type -> google.iam.v1.Policy + 52, // 79: google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 56, // [56:80] is the sub-list for method output_type + 32, // [32:56] is the sub-list for method input_type + 32, // [32:32] is the sub-list for extension type_name + 32, // [32:32] is the sub-list for extension extendee + 0, // [0:32] is the sub-list for field type_name } func init() { file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() } @@ -3240,7 +3574,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteTableRequest); i { + switch v := v.(*UpdateTableRequest); i { case 0: return &v.state case 1: @@ -3252,7 +3586,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ModifyColumnFamiliesRequest); i { + switch v := v.(*UpdateTableMetadata); i { case 0: return &v.state case 1: @@ -3264,7 +3598,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GenerateConsistencyTokenRequest); i { + switch v := v.(*DeleteTableRequest); i { case 0: return &v.state case 1: @@ -3276,7 +3610,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GenerateConsistencyTokenResponse); i { + switch v := v.(*UndeleteTableRequest); i { case 0: return &v.state case 1: @@ -3288,7 +3622,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckConsistencyRequest); i { + switch v := v.(*UndeleteTableMetadata); i { case 0: return &v.state case 1: @@ -3300,7 +3634,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckConsistencyResponse); i { + switch v := v.(*ModifyColumnFamiliesRequest); i { case 0: return &v.state case 1: @@ -3312,7 +3646,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SnapshotTableRequest); i { + switch v := v.(*GenerateConsistencyTokenRequest); i { case 0: return &v.state case 1: @@ -3324,7 +3658,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSnapshotRequest); i { + switch v := v.(*GenerateConsistencyTokenResponse); i { case 0: return &v.state case 1: @@ -3336,7 +3670,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListSnapshotsRequest); i { + switch v := v.(*CheckConsistencyRequest); i { case 0: return &v.state case 1: @@ -3348,7 +3682,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListSnapshotsResponse); i { + switch v := v.(*CheckConsistencyResponse); i { case 0: return &v.state case 1: @@ -3360,7 +3694,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteSnapshotRequest); i { + switch v := v.(*SnapshotTableRequest); i { case 0: return &v.state case 1: @@ -3372,7 +3706,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SnapshotTableMetadata); i { + switch v := v.(*GetSnapshotRequest); i { case 0: return &v.state case 1: @@ -3384,7 +3718,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTableFromSnapshotMetadata); i { + switch v := v.(*ListSnapshotsRequest); i { case 0: return &v.state case 1: @@ -3396,7 +3730,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateBackupRequest); i { + switch v := v.(*ListSnapshotsResponse); i { case 0: return &v.state case 1: @@ -3408,7 +3742,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateBackupMetadata); i { + switch v := v.(*DeleteSnapshotRequest); i { case 0: return &v.state case 1: @@ -3420,7 +3754,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateBackupRequest); i { + switch v := v.(*SnapshotTableMetadata); i { case 0: return &v.state case 1: @@ -3432,7 +3766,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBackupRequest); i { + switch v := v.(*CreateTableFromSnapshotMetadata); i { case 0: return &v.state case 1: @@ -3444,7 +3778,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteBackupRequest); i { + switch v := v.(*CreateBackupRequest); i { case 0: return &v.state case 1: @@ -3456,7 +3790,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListBackupsRequest); i { + switch v := v.(*CreateBackupMetadata); i { case 0: return &v.state case 1: @@ -3468,7 +3802,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListBackupsResponse); i { + switch v := v.(*UpdateBackupRequest); i { case 0: return &v.state case 1: @@ -3480,7 +3814,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTableRequest_Split); i { + switch v := v.(*GetBackupRequest); i { case 0: return &v.state case 1: @@ -3492,6 +3826,54 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteBackupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListBackupsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListBackupsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateTableRequest_Split); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ModifyColumnFamiliesRequest_Modification); i { case 0: return &v.state @@ -3514,7 +3896,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { (*DropRowRangeRequest_RowKeyPrefix)(nil), (*DropRowRangeRequest_DeleteAllDataFromTable)(nil), } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34].OneofWrappers = []interface{}{ (*ModifyColumnFamiliesRequest_Modification_Create)(nil), (*ModifyColumnFamiliesRequest_Modification_Update)(nil), (*ModifyColumnFamiliesRequest_Modification_Drop)(nil), @@ -3525,7 +3907,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc, NumEnums: 0, - NumMessages: 31, + NumMessages: 35, NumExtensions: 0, NumServices: 1, }, @@ -3568,8 +3950,12 @@ type BigtableTableAdminClient interface { ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error) // Gets metadata information about the specified table. GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*Table, error) + // Updates a specified table. + UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) // Permanently deletes a specified table and all of its data. DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Restores a specified table which was accidentally deleted. + UndeleteTable(ctx context.Context, in *UndeleteTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) // Performs a series of column family modifications on the specified table. // Either all or none of the modifications will occur before this method // returns, but data requests received prior to that point may see a table @@ -3703,6 +4089,15 @@ func (c *bigtableTableAdminClient) GetTable(ctx context.Context, in *GetTableReq return out, nil } +func (c *bigtableTableAdminClient) UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *bigtableTableAdminClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", in, out, opts...) @@ -3712,6 +4107,15 @@ func (c *bigtableTableAdminClient) DeleteTable(ctx context.Context, in *DeleteTa return out, nil } +func (c *bigtableTableAdminClient) UndeleteTable(ctx context.Context, in *UndeleteTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *bigtableTableAdminClient) ModifyColumnFamilies(ctx context.Context, in *ModifyColumnFamiliesRequest, opts ...grpc.CallOption) (*Table, error) { out := new(Table) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", in, out, opts...) @@ -3884,8 +4288,12 @@ type BigtableTableAdminServer interface { ListTables(context.Context, *ListTablesRequest) (*ListTablesResponse, error) // Gets metadata information about the specified table. GetTable(context.Context, *GetTableRequest) (*Table, error) + // Updates a specified table. + UpdateTable(context.Context, *UpdateTableRequest) (*longrunning.Operation, error) // Permanently deletes a specified table and all of its data. DeleteTable(context.Context, *DeleteTableRequest) (*emptypb.Empty, error) + // Restores a specified table which was accidentally deleted. + UndeleteTable(context.Context, *UndeleteTableRequest) (*longrunning.Operation, error) // Performs a series of column family modifications on the specified table. // Either all or none of the modifications will occur before this method // returns, but data requests received prior to that point may see a table @@ -3991,9 +4399,15 @@ func (*UnimplementedBigtableTableAdminServer) ListTables(context.Context, *ListT func (*UnimplementedBigtableTableAdminServer) GetTable(context.Context, *GetTableRequest) (*Table, error) { return nil, status.Errorf(codes.Unimplemented, "method GetTable not implemented") } +func (*UnimplementedBigtableTableAdminServer) UpdateTable(context.Context, *UpdateTableRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateTable not implemented") +} func (*UnimplementedBigtableTableAdminServer) DeleteTable(context.Context, *DeleteTableRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteTable not implemented") } +func (*UnimplementedBigtableTableAdminServer) UndeleteTable(context.Context, *UndeleteTableRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method UndeleteTable not implemented") +} func (*UnimplementedBigtableTableAdminServer) ModifyColumnFamilies(context.Context, *ModifyColumnFamiliesRequest) (*Table, error) { return nil, status.Errorf(codes.Unimplemented, "method ModifyColumnFamilies not implemented") } @@ -4122,6 +4536,24 @@ func _BigtableTableAdmin_GetTable_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } +func _BigtableTableAdmin_UpdateTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableAdminServer).UpdateTable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableAdminServer).UpdateTable(ctx, req.(*UpdateTableRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _BigtableTableAdmin_DeleteTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteTableRequest) if err := dec(in); err != nil { @@ -4140,6 +4572,24 @@ func _BigtableTableAdmin_DeleteTable_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } +func _BigtableTableAdmin_UndeleteTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UndeleteTableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableAdminServer).UndeleteTable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableAdminServer).UndeleteTable(ctx, req.(*UndeleteTableRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _BigtableTableAdmin_ModifyColumnFamilies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ModifyColumnFamiliesRequest) if err := dec(in); err != nil { @@ -4466,10 +4916,18 @@ var _BigtableTableAdmin_serviceDesc = grpc.ServiceDesc{ MethodName: "GetTable", Handler: _BigtableTableAdmin_GetTable_Handler, }, + { + MethodName: "UpdateTable", + Handler: _BigtableTableAdmin_UpdateTable_Handler, + }, { MethodName: "DeleteTable", Handler: _BigtableTableAdmin_DeleteTable_Handler, }, + { + MethodName: "UndeleteTable", + Handler: _BigtableTableAdmin_UndeleteTable_Handler, + }, { MethodName: "ModifyColumnFamilies", Handler: _BigtableTableAdmin_ModifyColumnFamilies_Handler, diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go index eb0d56251b1f1..b42ac1d9e98cf 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go @@ -254,6 +254,8 @@ type Instance struct { // For instances created before this field was added (August 2021), this value // is `seconds: 0, nanos: 1`. CreateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Output only. Reserved for future use. + SatisfiesPzs *bool `protobuf:"varint,8,opt,name=satisfies_pzs,json=satisfiesPzs,proto3,oneof" json:"satisfies_pzs,omitempty"` } func (x *Instance) Reset() { @@ -330,6 +332,13 @@ func (x *Instance) GetCreateTime() *timestamppb.Timestamp { return nil } +func (x *Instance) GetSatisfiesPzs() bool { + if x != nil && x.SatisfiesPzs != nil { + return *x.SatisfiesPzs + } + return false +} + // The Autoscaling targets for a Cluster. These determine the recommended nodes. type AutoscalingTargets struct { state protoimpl.MessageState @@ -341,6 +350,13 @@ type AutoscalingTargets struct { // 100 (total utilization), and is limited between 10 and 80, otherwise it // will return INVALID_ARGUMENT error. CpuUtilizationPercent int32 `protobuf:"varint,2,opt,name=cpu_utilization_percent,json=cpuUtilizationPercent,proto3" json:"cpu_utilization_percent,omitempty"` + // The storage utilization that the Autoscaler should be trying to achieve. + // This number is limited between 2560 (2.5TiB) and 5120 (5TiB) for a SSD + // cluster and between 8192 (8TiB) and 16384 (16TiB) for an HDD cluster; + // otherwise it will return INVALID_ARGUMENT error. If this value is set to 0, + // it will be treated as if it were set to the default value: 2560 for SSD, + // 8192 for HDD. + StorageUtilizationGibPerNode int32 `protobuf:"varint,3,opt,name=storage_utilization_gib_per_node,json=storageUtilizationGibPerNode,proto3" json:"storage_utilization_gib_per_node,omitempty"` } func (x *AutoscalingTargets) Reset() { @@ -382,6 +398,13 @@ func (x *AutoscalingTargets) GetCpuUtilizationPercent() int32 { return 0 } +func (x *AutoscalingTargets) GetStorageUtilizationGibPerNode() int32 { + if x != nil { + return x.StorageUtilizationGibPerNode + } + return 0 +} + // Limits for the number of nodes a Cluster can autoscale up/down to. type AutoscalingLimits struct { state protoimpl.MessageState @@ -1101,7 +1124,7 @@ var file_google_bigtable_admin_v2_instance_proto_rawDesc = []byte{ 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd3, 0x04, 0x0a, 0x08, 0x49, 0x6e, 0x73, 0x74, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x05, 0x0a, 0x08, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, @@ -1122,198 +1145,207 @@ var file_google_bigtable_admin_v2_instance_proto_rawDesc = []byte{ 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0x35, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, - 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, - 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, - 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x22, 0x3d, 0x0a, 0x04, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x50, 0x52, 0x4f, 0x44, 0x55, - 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x56, 0x45, 0x4c, - 0x4f, 0x50, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x3a, 0x53, 0xea, 0x41, 0x50, 0x0a, 0x25, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x22, 0x4c, 0x0a, - 0x12, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x22, 0x6d, 0x0a, 0x11, 0x41, - 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, - 0x12, 0x2b, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x5f, 0x6e, 0x6f, - 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, - 0x6d, 0x69, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x2b, 0x0a, - 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x6d, 0x61, 0x78, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0xf7, 0x08, 0x0a, 0x07, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x45, 0x0a, 0x08, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, - 0x05, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x42, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x5f, 0x6e, - 0x6f, 0x64, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, - 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x5c, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x12, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x64, - 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, + 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, + 0x50, 0x7a, 0x73, 0x88, 0x01, 0x01, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x35, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, + 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, + 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x22, 0x3d, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x56, 0x45, 0x4c, 0x4f, + 0x50, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x3a, 0x53, 0xea, 0x41, 0x50, 0x0a, 0x25, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x42, 0x10, 0x0a, 0x0e, + 0x5f, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x22, 0x94, + 0x01, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x74, 0x69, + 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x46, 0x0a, + 0x20, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x67, 0x69, 0x62, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x64, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x1c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x69, 0x62, 0x50, 0x65, + 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x22, 0x6d, 0x0a, 0x11, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, + 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6d, 0x69, + 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x53, 0x65, 0x72, 0x76, 0x65, 0x4e, + 0x6f, 0x64, 0x65, 0x73, 0x22, 0xf7, 0x08, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x45, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x23, 0x0a, 0x21, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, + 0x12, 0x58, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x14, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x42, + 0x03, 0xe0, 0x41, 0x05, 0x52, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x64, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x10, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xdf, + 0x01, 0x0a, 0x18, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, + 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5f, 0x0a, 0x12, 0x61, + 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, 0x61, 0x75, 0x74, 0x6f, 0x73, + 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13, + 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x45, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, - 0x41, 0x05, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xdf, 0x01, 0x0a, 0x18, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x5f, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, - 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, - 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x11, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, - 0x67, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, - 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x12, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x1a, 0x89, 0x01, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x78, 0x0a, 0x1a, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, - 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x18, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, + 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x61, 0x75, + 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, + 0x1a, 0x89, 0x01, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x78, 0x0a, 0x1a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x61, 0x75, + 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x1a, 0x5c, 0x0a, 0x10, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, 0x0a, 0x0c, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, - 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, - 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, - 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0a, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, - 0x22, 0x51, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, - 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, - 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x53, 0x49, 0x5a, - 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, - 0x44, 0x10, 0x04, 0x3a, 0x65, 0xea, 0x41, 0x62, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3a, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x42, 0x08, 0x0a, 0x06, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x22, 0x84, 0x05, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x82, 0x01, - 0x0a, 0x1d, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x75, 0x73, 0x65, 0x5f, 0x61, 0x6e, 0x79, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x4d, 0x75, 0x6c, 0x74, - 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, - 0x73, 0x65, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x19, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, - 0x6e, 0x79, 0x12, 0x71, 0x0a, 0x16, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, - 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, - 0x14, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x1a, 0x3c, 0x0a, 0x19, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, - 0x6e, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x49, 0x64, 0x73, 0x1a, 0x73, 0x0a, 0x14, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x73, 0x3a, 0x6f, 0xea, 0x41, 0x6c, 0x0a, 0x27, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x70, 0x70, 0x50, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x41, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, - 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x70, 0x70, - 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x7d, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xd4, 0x03, 0x0a, 0x09, - 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, - 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x27, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x17, - 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x16, 0x6e, 0x6f, 0x64, 0x65, 0x5f, - 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, - 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x02, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x6e, 0x6f, - 0x64, 0x65, 0x43, 0x70, 0x75, 0x55, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, - 0x74, 0x3a, 0x7f, 0xea, 0x41, 0x7c, 0x0a, 0x26, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x52, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x68, 0x6f, 0x74, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x7d, 0x42, 0xd0, 0x02, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, - 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, - 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, - 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, - 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, - 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, - 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x67, 0x52, 0x18, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73, + 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x5c, 0x0a, 0x10, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x48, 0x0a, 0x0c, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0a, + 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x51, 0x0a, 0x05, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x54, + 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, + 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, + 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x53, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, + 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x3a, 0x65, 0xea, + 0x41, 0x62, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x7d, 0x42, 0x08, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x84, + 0x05, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x82, 0x01, 0x0a, 0x1d, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x5f, 0x75, 0x73, 0x65, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x48, + 0x00, 0x52, 0x19, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x71, 0x0a, 0x16, + 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x14, 0x73, 0x69, 0x6e, 0x67, 0x6c, + 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x1a, + 0x3c, 0x0a, 0x19, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x1f, 0x0a, 0x0b, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x1a, 0x73, 0x0a, + 0x14, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x73, 0x3a, 0x6f, 0xea, 0x41, 0x6c, 0x0a, 0x27, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x12, 0x41, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, + 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x7d, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xd4, 0x03, 0x0a, 0x09, 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xfa, 0x41, 0x24, + 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, + 0x79, 0x12, 0x38, 0x0a, 0x16, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, + 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x02, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x6e, 0x6f, 0x64, 0x65, 0x43, 0x70, 0x75, 0x55, + 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x3a, 0x7f, 0xea, 0x41, 0x7c, + 0x0a, 0x26, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x48, + 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x52, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x2f, + 0x7b, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x7d, 0x42, 0xd0, 0x02, 0x0a, + 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x49, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, + 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, + 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, + 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x3a, 0x3a, 0x56, 0x32, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, + 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, + 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1512,6 +1544,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { } } } + file_google_bigtable_admin_v2_instance_proto_msgTypes[0].OneofWrappers = []interface{}{} file_google_bigtable_admin_v2_instance_proto_msgTypes[3].OneofWrappers = []interface{}{ (*Cluster_ClusterConfig_)(nil), } diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go index d09551bf200f3..edd18c4f43d8c 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go @@ -459,6 +459,7 @@ type RestoreInfo struct { // Information about the source used to restore the table. // // Types that are assignable to SourceInfo: + // // *RestoreInfo_BackupInfo SourceInfo isRestoreInfo_SourceInfo `protobuf_oneof:"source_info"` } @@ -556,6 +557,14 @@ type Table struct { // Output only. If this table was restored from another data source (e.g. a backup), this // field will be populated with information about the restore. RestoreInfo *RestoreInfo `protobuf:"bytes,6,opt,name=restore_info,json=restoreInfo,proto3" json:"restore_info,omitempty"` + // Set to true to make the table protected against data loss. i.e. deleting + // the following resources through Admin APIs are prohibited: + // - The table. + // - The column families in the table. + // - The instance containing the table. + // + // Note one can still delete the data stored in the table through Data APIs. + DeletionProtection bool `protobuf:"varint,9,opt,name=deletion_protection,json=deletionProtection,proto3" json:"deletion_protection,omitempty"` } func (x *Table) Reset() { @@ -625,6 +634,13 @@ func (x *Table) GetRestoreInfo() *RestoreInfo { return nil } +func (x *Table) GetDeletionProtection() bool { + if x != nil { + return x.DeletionProtection + } + return false +} + // A set of columns within a table which share a common configuration. type ColumnFamily struct { state protoimpl.MessageState @@ -688,6 +704,7 @@ type GcRule struct { // Garbage collection rules. // // Types that are assignable to Rule: + // // *GcRule_MaxNumVersions // *GcRule_MaxAge // *GcRule_Intersection_ @@ -993,7 +1010,9 @@ type Backup struct { // A globally unique identifier for the backup which cannot be // changed. Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}/ - // backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*` + // + // backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*` + // // The final segment of the name must be between 1 and 50 characters // in length. // @@ -1380,7 +1399,7 @@ var file_google_bigtable_admin_v2_table_proto_rawDesc = []byte{ 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x0d, 0x0a, - 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0xea, 0x09, 0x0a, + 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x9b, 0x0a, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x5e, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, @@ -1405,231 +1424,234 @@ var file_google_bigtable_admin_v2_table_proto_rawDesc = []byte{ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x1a, 0xe8, 0x02, 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x6f, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x56, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, - 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x8e, - 0x01, 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x54, - 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x4e, 0x49, 0x54, - 0x49, 0x41, 0x4c, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x4c, - 0x41, 0x4e, 0x4e, 0x45, 0x44, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, - 0x45, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x55, 0x4e, 0x50, 0x4c, 0x41, 0x4e, 0x4e, 0x45, 0x44, - 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x03, 0x12, 0x09, - 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x04, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x41, - 0x44, 0x59, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4d, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x1a, - 0x6e, 0x0a, 0x12, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x42, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, - 0x69, 0x0a, 0x13, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x12, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xe8, 0x02, 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x6f, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x56, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, + 0x8e, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, + 0x54, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x4e, 0x49, + 0x54, 0x49, 0x41, 0x4c, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x50, + 0x4c, 0x41, 0x4e, 0x4e, 0x45, 0x44, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, + 0x43, 0x45, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x55, 0x4e, 0x50, 0x4c, 0x41, 0x4e, 0x4e, 0x45, + 0x44, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x03, 0x12, + 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x04, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, + 0x41, 0x44, 0x59, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4d, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x05, + 0x1a, 0x6e, 0x0a, 0x12, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x42, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x14, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x47, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, 0x69, - 0x74, 0x79, 0x12, 0x25, 0x0a, 0x21, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x5f, - 0x47, 0x52, 0x41, 0x4e, 0x55, 0x4c, 0x41, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x49, 0x4c, - 0x4c, 0x49, 0x53, 0x10, 0x01, 0x22, 0x71, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, 0x12, 0x14, 0x0a, - 0x10, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, - 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x56, 0x49, 0x45, - 0x57, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x4e, 0x43, - 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x05, 0x12, 0x08, - 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x3a, 0x5f, 0xea, 0x41, 0x5c, 0x0a, 0x22, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x12, 0x36, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x7d, 0x22, 0x49, 0x0a, 0x0c, 0x43, 0x6f, 0x6c, - 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x39, 0x0a, 0x07, 0x67, 0x63, 0x5f, - 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x06, 0x67, 0x63, - 0x52, 0x75, 0x6c, 0x65, 0x22, 0x90, 0x03, 0x0a, 0x06, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x12, - 0x2a, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x0e, 0x6d, 0x61, 0x78, - 0x4e, 0x75, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x6d, - 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x41, 0x67, - 0x65, 0x12, 0x53, 0x0a, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x73, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x55, 0x6e, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, - 0x05, 0x75, 0x6e, 0x69, 0x6f, 0x6e, 0x1a, 0x46, 0x0a, 0x0c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x73, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x1a, 0x3f, - 0x0a, 0x05, 0x55, 0x6e, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, + 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x69, 0x0a, 0x13, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3c, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x14, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x47, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x25, 0x0a, 0x21, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, + 0x5f, 0x47, 0x52, 0x41, 0x4e, 0x55, 0x4c, 0x41, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x49, + 0x4c, 0x4c, 0x49, 0x53, 0x10, 0x01, 0x22, 0x71, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, 0x12, 0x14, + 0x0a, 0x10, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x56, 0x49, + 0x45, 0x57, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x4e, + 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x05, 0x12, + 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x3a, 0x5f, 0xea, 0x41, 0x5c, 0x0a, 0x22, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x12, 0x36, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x7d, 0x22, 0x49, 0x0a, 0x0c, 0x43, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x39, 0x0a, 0x07, 0x67, 0x63, + 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x06, 0x67, + 0x63, 0x52, 0x75, 0x6c, 0x65, 0x22, 0x90, 0x03, 0x0a, 0x06, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, + 0x12, 0x2a, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x0e, 0x6d, 0x61, + 0x78, 0x4e, 0x75, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x34, 0x0a, 0x07, + 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x41, + 0x67, 0x65, 0x12, 0x53, 0x0a, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x6f, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x55, 0x6e, 0x69, 0x6f, 0x6e, 0x48, 0x00, + 0x52, 0x05, 0x75, 0x6e, 0x69, 0x6f, 0x6e, 0x1a, 0x46, 0x0a, 0x0c, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x42, - 0x06, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x22, 0x8a, 0x03, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x65, 0x0a, 0x0f, 0x65, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x44, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x58, 0x0a, 0x0f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, - 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x30, 0xe0, 0x41, 0x03, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, - 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0d, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x71, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x44, - 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, - 0x4e, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, 0x5f, - 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, - 0x4f, 0x4e, 0x10, 0x02, 0x22, 0x9a, 0x04, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, + 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x1a, + 0x3f, 0x0a, 0x05, 0x55, 0x6e, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, + 0x42, 0x06, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x22, 0x8a, 0x03, 0x0a, 0x0e, 0x45, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x65, 0x0a, 0x0f, 0x65, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x44, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x58, 0x0a, 0x0f, 0x6b, 0x6d, 0x73, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x30, 0xe0, 0x41, 0x03, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x71, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, + 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, + 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, + 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, + 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x22, 0x9a, 0x04, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x0b, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x61, 0x74, - 0x61, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, - 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x05, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x35, 0x0a, - 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, - 0x4e, 0x4f, 0x54, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x52, - 0x45, 0x41, 0x44, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, - 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x12, 0x4f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, - 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x73, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x7d, 0x22, 0xf4, 0x04, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x29, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x05, 0xe0, 0x41, 0x02, 0x52, 0x0b, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3e, 0x0a, - 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x35, + 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, + 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, + 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, + 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x12, 0x4f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x7d, 0x22, 0xf4, 0x04, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x29, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x05, 0xe0, 0x41, 0x02, 0x52, + 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, + 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3a, 0x0a, - 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x73, 0x69, 0x7a, - 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x09, 0x73, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x41, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x56, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, - 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, - 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x37, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, - 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, - 0x02, 0x3a, 0x75, 0xea, 0x41, 0x72, 0x0a, 0x23, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x4b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, - 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, - 0x7b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x7d, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x06, 0x62, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x12, 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2a, 0x44, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, - 0x1f, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x10, 0x01, 0x42, 0xfc, - 0x02, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, - 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, - 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, - 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, - 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, - 0x3a, 0x56, 0x32, 0xea, 0x41, 0xa6, 0x01, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, - 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x7a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, - 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, - 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x41, 0x02, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3e, + 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3a, + 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x73, 0x69, + 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x41, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x56, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x37, 0x0a, 0x05, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, + 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, + 0x10, 0x02, 0x3a, 0x75, 0xea, 0x41, 0x72, 0x0a, 0x23, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x4b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, + 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, + 0x2f, 0x7b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x7d, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x06, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2a, 0x44, 0x0a, 0x11, 0x52, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, + 0x0a, 0x1f, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x10, 0x01, 0x42, + 0xfc, 0x02, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x42, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, + 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, + 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, + 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x3a, 0x3a, 0x56, 0x32, 0xea, 0x41, 0xa6, 0x01, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, + 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x7a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, + 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go index b4b10b9c82eee..e0664f87033e8 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go @@ -42,6 +42,63 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// The desired view into RequestStats that should be returned in the response. +// +// See also: RequestStats message. +type ReadRowsRequest_RequestStatsView int32 + +const ( + // The default / unset value. The API will default to the NONE option below. + ReadRowsRequest_REQUEST_STATS_VIEW_UNSPECIFIED ReadRowsRequest_RequestStatsView = 0 + // Do not include any RequestStats in the response. This will leave the + // RequestStats embedded message unset in the response. + ReadRowsRequest_REQUEST_STATS_NONE ReadRowsRequest_RequestStatsView = 1 + // Include the full set of available RequestStats in the response, + // applicable to this read. + ReadRowsRequest_REQUEST_STATS_FULL ReadRowsRequest_RequestStatsView = 2 +) + +// Enum value maps for ReadRowsRequest_RequestStatsView. +var ( + ReadRowsRequest_RequestStatsView_name = map[int32]string{ + 0: "REQUEST_STATS_VIEW_UNSPECIFIED", + 1: "REQUEST_STATS_NONE", + 2: "REQUEST_STATS_FULL", + } + ReadRowsRequest_RequestStatsView_value = map[string]int32{ + "REQUEST_STATS_VIEW_UNSPECIFIED": 0, + "REQUEST_STATS_NONE": 1, + "REQUEST_STATS_FULL": 2, + } +) + +func (x ReadRowsRequest_RequestStatsView) Enum() *ReadRowsRequest_RequestStatsView { + p := new(ReadRowsRequest_RequestStatsView) + *p = x + return p +} + +func (x ReadRowsRequest_RequestStatsView) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ReadRowsRequest_RequestStatsView) Descriptor() protoreflect.EnumDescriptor { + return file_google_bigtable_v2_bigtable_proto_enumTypes[0].Descriptor() +} + +func (ReadRowsRequest_RequestStatsView) Type() protoreflect.EnumType { + return &file_google_bigtable_v2_bigtable_proto_enumTypes[0] +} + +func (x ReadRowsRequest_RequestStatsView) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ReadRowsRequest_RequestStatsView.Descriptor instead. +func (ReadRowsRequest_RequestStatsView) EnumDescriptor() ([]byte, []int) { + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{0, 0} +} + // Request message for Bigtable.ReadRows. type ReadRowsRequest struct { state protoimpl.MessageState @@ -52,8 +109,8 @@ type ReadRowsRequest struct { // Values are of the form // `projects//instances//tables/
`. TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` - // This value specifies routing for replication. If not specified, the - // "default" application profile will be used. + // This value specifies routing for replication. This API only accepts the + // empty value of app_profile_id. AppProfileId string `protobuf:"bytes,5,opt,name=app_profile_id,json=appProfileId,proto3" json:"app_profile_id,omitempty"` // The row keys and/or ranges to read sequentially. If not specified, reads // from all rows. @@ -64,6 +121,8 @@ type ReadRowsRequest struct { // The read will stop after committing to N rows' worth of results. The // default (zero) is to return all results. RowsLimit int64 `protobuf:"varint,4,opt,name=rows_limit,json=rowsLimit,proto3" json:"rows_limit,omitempty"` + // The view into RequestStats, as described above. + RequestStatsView ReadRowsRequest_RequestStatsView `protobuf:"varint,6,opt,name=request_stats_view,json=requestStatsView,proto3,enum=google.bigtable.v2.ReadRowsRequest_RequestStatsView" json:"request_stats_view,omitempty"` } func (x *ReadRowsRequest) Reset() { @@ -133,6 +192,13 @@ func (x *ReadRowsRequest) GetRowsLimit() int64 { return 0 } +func (x *ReadRowsRequest) GetRequestStatsView() ReadRowsRequest_RequestStatsView { + if x != nil { + return x.RequestStatsView + } + return ReadRowsRequest_REQUEST_STATS_VIEW_UNSPECIFIED +} + // Response message for Bigtable.ReadRows. type ReadRowsResponse struct { state protoimpl.MessageState @@ -149,6 +215,28 @@ type ReadRowsResponse struct { // lot of data that was filtered out since the last committed row // key, allowing the client to skip that work on a retry. LastScannedRowKey []byte `protobuf:"bytes,2,opt,name=last_scanned_row_key,json=lastScannedRowKey,proto3" json:"last_scanned_row_key,omitempty"` + // If requested, provide enhanced query performance statistics. The semantics + // dictate: + // - request_stats is empty on every (streamed) response, except + // - request_stats has non-empty information after all chunks have been + // streamed, where the ReadRowsResponse message only contains + // request_stats. + // - For example, if a read request would have returned an empty + // response instead a single ReadRowsResponse is streamed with empty + // chunks and request_stats filled. + // + // Visually, response messages will stream as follows: + // + // ... -> {chunks: [...]} -> {chunks: [], request_stats: {...}} + // \______________________/ \________________________________/ + // Primary response Trailer of RequestStats info + // + // Or if the read did not return any values: + // + // {chunks: [], request_stats: {...}} + // \________________________________/ + // Trailer of RequestStats info + RequestStats *RequestStats `protobuf:"bytes,3,opt,name=request_stats,json=requestStats,proto3" json:"request_stats,omitempty"` } func (x *ReadRowsResponse) Reset() { @@ -197,6 +285,13 @@ func (x *ReadRowsResponse) GetLastScannedRowKey() []byte { return nil } +func (x *ReadRowsResponse) GetRequestStats() *RequestStats { + if x != nil { + return x.RequestStats + } + return nil +} + // Request message for Bigtable.SampleRowKeys. type SampleRowKeysRequest struct { state protoimpl.MessageState @@ -1007,6 +1102,7 @@ type ReadRowsResponse_CellChunk struct { // Signals to the client concerning previous CellChunks received. // // Types that are assignable to RowStatus: + // // *ReadRowsResponse_CellChunk_ResetRow // *ReadRowsResponse_CellChunk_CommitRow RowStatus isReadRowsResponse_CellChunk_RowStatus `protobuf_oneof:"row_status"` @@ -1274,73 +1370,177 @@ var file_google_bigtable_v2_bigtable_proto_rawDesc = []byte{ 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x88, 0x02, 0x0a, - 0x0f, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd4, 0x03, 0x0a, 0x0f, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x70, + 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x72, 0x6f, 0x77, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, + 0x53, 0x65, 0x74, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x35, 0x0a, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, + 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x6f, 0x77, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, + 0x62, 0x0a, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, + 0x5f, 0x76, 0x69, 0x65, 0x77, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x56, 0x69, 0x65, + 0x77, 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x56, + 0x69, 0x65, 0x77, 0x22, 0x66, 0x0a, 0x10, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x56, 0x69, 0x65, 0x77, 0x12, 0x22, 0x0a, 0x1e, 0x52, 0x45, 0x51, 0x55, 0x45, + 0x53, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x53, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x52, + 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x53, 0x5f, 0x4e, 0x4f, 0x4e, + 0x45, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x53, + 0x54, 0x41, 0x54, 0x53, 0x5f, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x02, 0x22, 0xb9, 0x04, 0x0a, 0x10, + 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x46, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x43, 0x68, 0x75, 0x6e, 0x6b, + 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x2f, 0x0a, 0x14, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x73, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x63, 0x61, 0x6e, + 0x6e, 0x65, 0x64, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x0d, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x1a, 0xe4, 0x02, 0x0a, 0x09, 0x43, 0x65, 0x6c, 0x6c, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x17, + 0x0a, 0x07, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x0b, 0x66, 0x61, 0x6d, 0x69, 0x6c, + 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x66, 0x61, 0x6d, 0x69, + 0x6c, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x09, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, + 0x69, 0x63, 0x72, 0x6f, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x12, 0x16, 0x0a, 0x06, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x72, 0x65, 0x73, + 0x65, 0x74, 0x5f, 0x72, 0x6f, 0x77, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x65, 0x74, 0x52, 0x6f, 0x77, 0x12, 0x1f, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x5f, 0x72, 0x6f, 0x77, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, + 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x6f, 0x77, 0x42, 0x0c, 0x0a, 0x0a, 0x72, 0x6f, 0x77, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x53, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, - 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, + 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, - 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x53, 0x65, 0x74, 0x52, 0x04, 0x72, 0x6f, 0x77, - 0x73, 0x12, 0x35, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x6f, 0x77, 0x73, - 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x6f, - 0x77, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0xf2, 0x03, 0x0a, 0x10, 0x52, 0x65, 0x61, 0x64, - 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x06, - 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, - 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x2f, 0x0a, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x63, 0x61, - 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, - 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x1a, 0xe4, 0x02, 0x0a, 0x09, 0x43, 0x65, 0x6c, 0x6c, 0x43, 0x68, - 0x75, 0x6e, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x0b, - 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0a, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x09, 0x71, - 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x71, 0x75, 0x61, - 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x63, 0x72, 0x6f, - 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, - 0x0a, 0x09, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x72, 0x6f, 0x77, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x08, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x74, 0x52, 0x6f, 0x77, 0x12, 0x1f, 0x0a, - 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x72, 0x6f, 0x77, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x08, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x6f, 0x77, 0x42, 0x0c, - 0x0a, 0x0a, 0x72, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x87, 0x01, 0x0a, - 0x14, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, - 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x22, 0x53, 0x0a, 0x15, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x17, 0x0a, 0x07, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x06, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x73, - 0x65, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, - 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe2, 0x01, 0x0a, 0x10, - 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x64, 0x22, 0x53, 0x0a, 0x15, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, + 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x72, 0x6f, + 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x6f, 0x77, + 0x4b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe2, 0x01, 0x0a, 0x10, 0x4d, 0x75, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x07, + 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x06, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x09, 0x6d, 0x75, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x13, 0x0a, 0x11, 0x4d, + 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0xb3, 0x02, 0x0a, 0x11, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x4a, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x1a, 0x61, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x07, + 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, + 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x6d, 0x75, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x12, 0x4d, 0x75, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, + 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, + 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x22, 0xff, 0x02, 0x0a, 0x18, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, + 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, + 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x1c, + 0x0a, 0x07, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, 0x48, 0x0a, 0x10, + 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0e, 0x74, 0x72, 0x75, 0x65, 0x5f, 0x6d, + 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x74, 0x72, + 0x75, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0f, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0x48, 0x0a, 0x19, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2b, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x72, 0x65, 0x64, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x22, 0x7d, 0x0a, 0x12, + 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, + 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x50, + 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0xee, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, + 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, @@ -1350,286 +1550,202 @@ var file_google_bigtable_v2_bigtable_proto_rawDesc = []byte{ 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x07, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, - 0x3f, 0x0a, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0x13, 0x0a, 0x11, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb3, 0x02, 0x0a, 0x11, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, - 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x4a, 0x0a, 0x07, - 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x61, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x17, 0x0a, 0x07, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x06, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x09, 0x6d, 0x75, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x12, - 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x05, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xff, 0x02, 0x0a, 0x18, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, - 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, - 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, - 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x07, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x72, 0x6f, 0x77, 0x4b, 0x65, - 0x79, 0x12, 0x48, 0x0a, 0x10, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x52, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x64, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0e, 0x74, - 0x72, 0x75, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x0d, 0x74, 0x72, 0x75, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x45, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, - 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x4d, 0x75, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x48, 0x0a, 0x19, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x10, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x64, 0x22, 0x7d, 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, - 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, - 0x22, 0x15, 0x0a, 0x13, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xee, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x61, 0x64, - 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, - 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x07, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x72, 0x6f, - 0x77, 0x4b, 0x65, 0x79, 0x12, 0x42, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, - 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x47, 0x0a, 0x1a, 0x52, 0x65, 0x61, 0x64, - 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x03, 0x72, 0x6f, 0x77, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x03, 0x72, 0x6f, - 0x77, 0x32, 0xb0, 0x14, 0x0a, 0x08, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x9b, - 0x02, 0x0a, 0x08, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x23, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc1, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x22, - 0x39, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, - 0x7d, 0x3a, 0x72, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, - 0x93, 0x02, 0x4e, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, - 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, - 0x64, 0xda, 0x41, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, - 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, - 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x30, 0x01, 0x12, 0xac, 0x02, 0x0a, - 0x0d, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x28, + 0x42, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x61, - 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0xc3, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x12, 0x3e, 0x2f, 0x76, - 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x4e, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, - 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, - 0xda, 0x41, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, 0x19, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x30, 0x01, 0x12, 0xc1, 0x02, 0x0a, 0x09, - 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, - 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xe6, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3f, 0x22, - 0x3a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, - 0x7d, 0x3a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x4e, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, - 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x69, 0x64, 0xda, 0x41, 0x1c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, - 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0xda, 0x41, 0x2b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, - 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, - 0xb3, 0x02, 0x0a, 0x0a, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x25, + 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x72, 0x75, + 0x6c, 0x65, 0x73, 0x22, 0x47, 0x0a, 0x1a, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, + 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x29, 0x0a, 0x03, 0x72, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd3, 0x01, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x6f, 0x77, 0x73, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x12, 0x3a, 0x0a, 0x0a, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, - 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x12, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0xda, - 0x41, 0x21, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x65, 0x6e, 0x74, - 0x72, 0x69, 0x65, 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, - 0x5f, 0x69, 0x64, 0x30, 0x01, 0x12, 0xad, 0x03, 0x0a, 0x11, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, - 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x12, 0x2c, 0x2e, 0x67, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x03, 0x72, 0x6f, 0x77, 0x32, 0xb0, 0x14, 0x0a, + 0x08, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x9b, 0x02, 0x0a, 0x08, 0x52, 0x65, + 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, + 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xba, 0x02, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x47, 0x22, 0x42, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x2f, 0x2a, 0x7d, 0x3a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x6f, 0x77, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x12, 0x3a, + 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0xc1, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x22, 0x39, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x61, + 0x64, 0x52, 0x6f, 0x77, 0x73, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, - 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x42, 0x74, + 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x0a, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, 0x19, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x30, 0x01, 0x12, 0xac, 0x02, 0x0a, 0x0d, 0x53, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, + 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc3, + 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x12, 0x3e, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x12, 0x3a, 0x0a, + 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, + 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x0a, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x69, 0x64, 0x30, 0x01, 0x12, 0xc1, 0x02, 0x0a, 0x09, 0x4d, 0x75, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x6f, 0x77, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0xe6, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3f, 0x22, 0x3a, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x75, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x12, + 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, + 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x1c, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, + 0x65, 0x79, 0x2c, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xda, 0x41, 0x2b, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, + 0x79, 0x2c, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, + 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, 0xb3, 0x02, 0x0a, 0x0a, 0x4d, + 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, + 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd3, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x40, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x3a, 0x01, + 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x2c, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0xda, 0x41, 0x21, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x2c, + 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x30, 0x01, + 0x12, 0xad, 0x03, 0x0a, 0x11, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, + 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0xba, 0x02, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x22, 0x42, 0x2f, 0x76, + 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, + 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x42, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x70, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2c, 0x74, 0x72, + 0x75, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xda, 0x41, 0x51, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2c, 0x74, 0x72, 0x75, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0xda, 0x41, 0x51, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, - 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2c, 0x74, 0x72, 0x75, 0x65, 0x5f, 0x6d, 0x75, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x5f, 0x6d, 0x75, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, 0xee, 0x01, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, - 0x64, 0x57, 0x61, 0x72, 0x6d, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x41, - 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8d, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x22, - 0x26, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, + 0x12, 0xee, 0x01, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, + 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, + 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x69, + 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x8d, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x22, 0x26, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x70, 0x69, + 0x6e, 0x67, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x39, 0x12, 0x25, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x7d, 0x3a, 0x70, 0x69, 0x6e, 0x67, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x39, - 0x12, 0x25, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0xda, 0x41, 0x13, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, 0xdd, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4d, - 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x12, 0x2d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, - 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xe7, 0x01, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x48, 0x22, 0x43, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, - 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, - 0x93, 0x02, 0x4e, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, - 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, - 0x64, 0xda, 0x41, 0x18, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, - 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x72, 0x75, 0x6c, 0x65, 0x73, 0xda, 0x41, 0x27, 0x74, + 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, 0x13, 0x6e, 0x61, + 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, + 0x64, 0x12, 0xdd, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, + 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, + 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xe7, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x48, + 0x22, 0x43, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x52, 0x6f, 0x77, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x12, 0x3a, + 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, + 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x18, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, - 0x79, 0x2c, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x1a, 0xdb, 0x02, 0xca, 0x41, 0x17, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0xbd, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, - 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x64, 0x61, 0x74, 0x61, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, - 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2c, + 0x79, 0x2c, 0x72, 0x75, 0x6c, 0x65, 0x73, 0xda, 0x41, 0x27, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x72, 0x75, 0x6c, + 0x65, 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, + 0x64, 0x1a, 0xdb, 0x02, 0xca, 0x41, 0x17, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, + 0xbd, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, + 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, - 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, - 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xeb, 0x02, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, - 0x0d, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x3a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2f, 0x76, 0x32, 0x3b, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, - 0x56, 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, - 0xea, 0x41, 0x5c, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x36, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, - 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x7d, 0xea, - 0x41, 0x50, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x72, 0x65, + 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, + 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, + 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x72, + 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, + 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, + 0xeb, 0x02, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x42, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x1b, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0xea, 0x41, 0x50, 0x0a, 0x25, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0xea, 0x41, + 0x5c, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x36, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x7d, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1644,69 +1760,74 @@ func file_google_bigtable_v2_bigtable_proto_rawDescGZIP() []byte { return file_google_bigtable_v2_bigtable_proto_rawDescData } +var file_google_bigtable_v2_bigtable_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_bigtable_v2_bigtable_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_google_bigtable_v2_bigtable_proto_goTypes = []interface{}{ - (*ReadRowsRequest)(nil), // 0: google.bigtable.v2.ReadRowsRequest - (*ReadRowsResponse)(nil), // 1: google.bigtable.v2.ReadRowsResponse - (*SampleRowKeysRequest)(nil), // 2: google.bigtable.v2.SampleRowKeysRequest - (*SampleRowKeysResponse)(nil), // 3: google.bigtable.v2.SampleRowKeysResponse - (*MutateRowRequest)(nil), // 4: google.bigtable.v2.MutateRowRequest - (*MutateRowResponse)(nil), // 5: google.bigtable.v2.MutateRowResponse - (*MutateRowsRequest)(nil), // 6: google.bigtable.v2.MutateRowsRequest - (*MutateRowsResponse)(nil), // 7: google.bigtable.v2.MutateRowsResponse - (*CheckAndMutateRowRequest)(nil), // 8: google.bigtable.v2.CheckAndMutateRowRequest - (*CheckAndMutateRowResponse)(nil), // 9: google.bigtable.v2.CheckAndMutateRowResponse - (*PingAndWarmRequest)(nil), // 10: google.bigtable.v2.PingAndWarmRequest - (*PingAndWarmResponse)(nil), // 11: google.bigtable.v2.PingAndWarmResponse - (*ReadModifyWriteRowRequest)(nil), // 12: google.bigtable.v2.ReadModifyWriteRowRequest - (*ReadModifyWriteRowResponse)(nil), // 13: google.bigtable.v2.ReadModifyWriteRowResponse - (*ReadRowsResponse_CellChunk)(nil), // 14: google.bigtable.v2.ReadRowsResponse.CellChunk - (*MutateRowsRequest_Entry)(nil), // 15: google.bigtable.v2.MutateRowsRequest.Entry - (*MutateRowsResponse_Entry)(nil), // 16: google.bigtable.v2.MutateRowsResponse.Entry - (*RowSet)(nil), // 17: google.bigtable.v2.RowSet - (*RowFilter)(nil), // 18: google.bigtable.v2.RowFilter - (*Mutation)(nil), // 19: google.bigtable.v2.Mutation - (*ReadModifyWriteRule)(nil), // 20: google.bigtable.v2.ReadModifyWriteRule - (*Row)(nil), // 21: google.bigtable.v2.Row - (*wrapperspb.StringValue)(nil), // 22: google.protobuf.StringValue - (*wrapperspb.BytesValue)(nil), // 23: google.protobuf.BytesValue - (*status.Status)(nil), // 24: google.rpc.Status + (ReadRowsRequest_RequestStatsView)(0), // 0: google.bigtable.v2.ReadRowsRequest.RequestStatsView + (*ReadRowsRequest)(nil), // 1: google.bigtable.v2.ReadRowsRequest + (*ReadRowsResponse)(nil), // 2: google.bigtable.v2.ReadRowsResponse + (*SampleRowKeysRequest)(nil), // 3: google.bigtable.v2.SampleRowKeysRequest + (*SampleRowKeysResponse)(nil), // 4: google.bigtable.v2.SampleRowKeysResponse + (*MutateRowRequest)(nil), // 5: google.bigtable.v2.MutateRowRequest + (*MutateRowResponse)(nil), // 6: google.bigtable.v2.MutateRowResponse + (*MutateRowsRequest)(nil), // 7: google.bigtable.v2.MutateRowsRequest + (*MutateRowsResponse)(nil), // 8: google.bigtable.v2.MutateRowsResponse + (*CheckAndMutateRowRequest)(nil), // 9: google.bigtable.v2.CheckAndMutateRowRequest + (*CheckAndMutateRowResponse)(nil), // 10: google.bigtable.v2.CheckAndMutateRowResponse + (*PingAndWarmRequest)(nil), // 11: google.bigtable.v2.PingAndWarmRequest + (*PingAndWarmResponse)(nil), // 12: google.bigtable.v2.PingAndWarmResponse + (*ReadModifyWriteRowRequest)(nil), // 13: google.bigtable.v2.ReadModifyWriteRowRequest + (*ReadModifyWriteRowResponse)(nil), // 14: google.bigtable.v2.ReadModifyWriteRowResponse + (*ReadRowsResponse_CellChunk)(nil), // 15: google.bigtable.v2.ReadRowsResponse.CellChunk + (*MutateRowsRequest_Entry)(nil), // 16: google.bigtable.v2.MutateRowsRequest.Entry + (*MutateRowsResponse_Entry)(nil), // 17: google.bigtable.v2.MutateRowsResponse.Entry + (*RowSet)(nil), // 18: google.bigtable.v2.RowSet + (*RowFilter)(nil), // 19: google.bigtable.v2.RowFilter + (*RequestStats)(nil), // 20: google.bigtable.v2.RequestStats + (*Mutation)(nil), // 21: google.bigtable.v2.Mutation + (*ReadModifyWriteRule)(nil), // 22: google.bigtable.v2.ReadModifyWriteRule + (*Row)(nil), // 23: google.bigtable.v2.Row + (*wrapperspb.StringValue)(nil), // 24: google.protobuf.StringValue + (*wrapperspb.BytesValue)(nil), // 25: google.protobuf.BytesValue + (*status.Status)(nil), // 26: google.rpc.Status } var file_google_bigtable_v2_bigtable_proto_depIdxs = []int32{ - 17, // 0: google.bigtable.v2.ReadRowsRequest.rows:type_name -> google.bigtable.v2.RowSet - 18, // 1: google.bigtable.v2.ReadRowsRequest.filter:type_name -> google.bigtable.v2.RowFilter - 14, // 2: google.bigtable.v2.ReadRowsResponse.chunks:type_name -> google.bigtable.v2.ReadRowsResponse.CellChunk - 19, // 3: google.bigtable.v2.MutateRowRequest.mutations:type_name -> google.bigtable.v2.Mutation - 15, // 4: google.bigtable.v2.MutateRowsRequest.entries:type_name -> google.bigtable.v2.MutateRowsRequest.Entry - 16, // 5: google.bigtable.v2.MutateRowsResponse.entries:type_name -> google.bigtable.v2.MutateRowsResponse.Entry - 18, // 6: google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter:type_name -> google.bigtable.v2.RowFilter - 19, // 7: google.bigtable.v2.CheckAndMutateRowRequest.true_mutations:type_name -> google.bigtable.v2.Mutation - 19, // 8: google.bigtable.v2.CheckAndMutateRowRequest.false_mutations:type_name -> google.bigtable.v2.Mutation - 20, // 9: google.bigtable.v2.ReadModifyWriteRowRequest.rules:type_name -> google.bigtable.v2.ReadModifyWriteRule - 21, // 10: google.bigtable.v2.ReadModifyWriteRowResponse.row:type_name -> google.bigtable.v2.Row - 22, // 11: google.bigtable.v2.ReadRowsResponse.CellChunk.family_name:type_name -> google.protobuf.StringValue - 23, // 12: google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier:type_name -> google.protobuf.BytesValue - 19, // 13: google.bigtable.v2.MutateRowsRequest.Entry.mutations:type_name -> google.bigtable.v2.Mutation - 24, // 14: google.bigtable.v2.MutateRowsResponse.Entry.status:type_name -> google.rpc.Status - 0, // 15: google.bigtable.v2.Bigtable.ReadRows:input_type -> google.bigtable.v2.ReadRowsRequest - 2, // 16: google.bigtable.v2.Bigtable.SampleRowKeys:input_type -> google.bigtable.v2.SampleRowKeysRequest - 4, // 17: google.bigtable.v2.Bigtable.MutateRow:input_type -> google.bigtable.v2.MutateRowRequest - 6, // 18: google.bigtable.v2.Bigtable.MutateRows:input_type -> google.bigtable.v2.MutateRowsRequest - 8, // 19: google.bigtable.v2.Bigtable.CheckAndMutateRow:input_type -> google.bigtable.v2.CheckAndMutateRowRequest - 10, // 20: google.bigtable.v2.Bigtable.PingAndWarm:input_type -> google.bigtable.v2.PingAndWarmRequest - 12, // 21: google.bigtable.v2.Bigtable.ReadModifyWriteRow:input_type -> google.bigtable.v2.ReadModifyWriteRowRequest - 1, // 22: google.bigtable.v2.Bigtable.ReadRows:output_type -> google.bigtable.v2.ReadRowsResponse - 3, // 23: google.bigtable.v2.Bigtable.SampleRowKeys:output_type -> google.bigtable.v2.SampleRowKeysResponse - 5, // 24: google.bigtable.v2.Bigtable.MutateRow:output_type -> google.bigtable.v2.MutateRowResponse - 7, // 25: google.bigtable.v2.Bigtable.MutateRows:output_type -> google.bigtable.v2.MutateRowsResponse - 9, // 26: google.bigtable.v2.Bigtable.CheckAndMutateRow:output_type -> google.bigtable.v2.CheckAndMutateRowResponse - 11, // 27: google.bigtable.v2.Bigtable.PingAndWarm:output_type -> google.bigtable.v2.PingAndWarmResponse - 13, // 28: google.bigtable.v2.Bigtable.ReadModifyWriteRow:output_type -> google.bigtable.v2.ReadModifyWriteRowResponse - 22, // [22:29] is the sub-list for method output_type - 15, // [15:22] is the sub-list for method input_type - 15, // [15:15] is the sub-list for extension type_name - 15, // [15:15] is the sub-list for extension extendee - 0, // [0:15] is the sub-list for field type_name + 18, // 0: google.bigtable.v2.ReadRowsRequest.rows:type_name -> google.bigtable.v2.RowSet + 19, // 1: google.bigtable.v2.ReadRowsRequest.filter:type_name -> google.bigtable.v2.RowFilter + 0, // 2: google.bigtable.v2.ReadRowsRequest.request_stats_view:type_name -> google.bigtable.v2.ReadRowsRequest.RequestStatsView + 15, // 3: google.bigtable.v2.ReadRowsResponse.chunks:type_name -> google.bigtable.v2.ReadRowsResponse.CellChunk + 20, // 4: google.bigtable.v2.ReadRowsResponse.request_stats:type_name -> google.bigtable.v2.RequestStats + 21, // 5: google.bigtable.v2.MutateRowRequest.mutations:type_name -> google.bigtable.v2.Mutation + 16, // 6: google.bigtable.v2.MutateRowsRequest.entries:type_name -> google.bigtable.v2.MutateRowsRequest.Entry + 17, // 7: google.bigtable.v2.MutateRowsResponse.entries:type_name -> google.bigtable.v2.MutateRowsResponse.Entry + 19, // 8: google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter:type_name -> google.bigtable.v2.RowFilter + 21, // 9: google.bigtable.v2.CheckAndMutateRowRequest.true_mutations:type_name -> google.bigtable.v2.Mutation + 21, // 10: google.bigtable.v2.CheckAndMutateRowRequest.false_mutations:type_name -> google.bigtable.v2.Mutation + 22, // 11: google.bigtable.v2.ReadModifyWriteRowRequest.rules:type_name -> google.bigtable.v2.ReadModifyWriteRule + 23, // 12: google.bigtable.v2.ReadModifyWriteRowResponse.row:type_name -> google.bigtable.v2.Row + 24, // 13: google.bigtable.v2.ReadRowsResponse.CellChunk.family_name:type_name -> google.protobuf.StringValue + 25, // 14: google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier:type_name -> google.protobuf.BytesValue + 21, // 15: google.bigtable.v2.MutateRowsRequest.Entry.mutations:type_name -> google.bigtable.v2.Mutation + 26, // 16: google.bigtable.v2.MutateRowsResponse.Entry.status:type_name -> google.rpc.Status + 1, // 17: google.bigtable.v2.Bigtable.ReadRows:input_type -> google.bigtable.v2.ReadRowsRequest + 3, // 18: google.bigtable.v2.Bigtable.SampleRowKeys:input_type -> google.bigtable.v2.SampleRowKeysRequest + 5, // 19: google.bigtable.v2.Bigtable.MutateRow:input_type -> google.bigtable.v2.MutateRowRequest + 7, // 20: google.bigtable.v2.Bigtable.MutateRows:input_type -> google.bigtable.v2.MutateRowsRequest + 9, // 21: google.bigtable.v2.Bigtable.CheckAndMutateRow:input_type -> google.bigtable.v2.CheckAndMutateRowRequest + 11, // 22: google.bigtable.v2.Bigtable.PingAndWarm:input_type -> google.bigtable.v2.PingAndWarmRequest + 13, // 23: google.bigtable.v2.Bigtable.ReadModifyWriteRow:input_type -> google.bigtable.v2.ReadModifyWriteRowRequest + 2, // 24: google.bigtable.v2.Bigtable.ReadRows:output_type -> google.bigtable.v2.ReadRowsResponse + 4, // 25: google.bigtable.v2.Bigtable.SampleRowKeys:output_type -> google.bigtable.v2.SampleRowKeysResponse + 6, // 26: google.bigtable.v2.Bigtable.MutateRow:output_type -> google.bigtable.v2.MutateRowResponse + 8, // 27: google.bigtable.v2.Bigtable.MutateRows:output_type -> google.bigtable.v2.MutateRowsResponse + 10, // 28: google.bigtable.v2.Bigtable.CheckAndMutateRow:output_type -> google.bigtable.v2.CheckAndMutateRowResponse + 12, // 29: google.bigtable.v2.Bigtable.PingAndWarm:output_type -> google.bigtable.v2.PingAndWarmResponse + 14, // 30: google.bigtable.v2.Bigtable.ReadModifyWriteRow:output_type -> google.bigtable.v2.ReadModifyWriteRowResponse + 24, // [24:31] is the sub-list for method output_type + 17, // [17:24] is the sub-list for method input_type + 17, // [17:17] is the sub-list for extension type_name + 17, // [17:17] is the sub-list for extension extendee + 0, // [0:17] is the sub-list for field type_name } func init() { file_google_bigtable_v2_bigtable_proto_init() } @@ -1715,6 +1836,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return } file_google_bigtable_v2_data_proto_init() + file_google_bigtable_v2_request_stats_proto_init() if !protoimpl.UnsafeEnabled { file_google_bigtable_v2_bigtable_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadRowsRequest); i { @@ -1930,13 +2052,14 @@ func file_google_bigtable_v2_bigtable_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_bigtable_v2_bigtable_proto_rawDesc, - NumEnums: 0, + NumEnums: 1, NumMessages: 17, NumExtensions: 0, NumServices: 1, }, GoTypes: file_google_bigtable_v2_bigtable_proto_goTypes, DependencyIndexes: file_google_bigtable_v2_bigtable_proto_depIdxs, + EnumInfos: file_google_bigtable_v2_bigtable_proto_enumTypes, MessageInfos: file_google_bigtable_v2_bigtable_proto_msgTypes, }.Build() File_google_bigtable_v2_bigtable_proto = out.File diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/request_stats.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/request_stats.pb.go new file mode 100644 index 0000000000000..e3f27c891360f --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/request_stats.pb.go @@ -0,0 +1,497 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/bigtable/v2/request_stats.proto + +package bigtable + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// ReadIterationStats captures information about the iteration of rows or cells +// over the course of a read, e.g. how many results were scanned in a read +// operation versus the results returned. +type ReadIterationStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The rows seen (scanned) as part of the request. This includes the count of + // rows returned, as captured below. + RowsSeenCount int64 `protobuf:"varint,1,opt,name=rows_seen_count,json=rowsSeenCount,proto3" json:"rows_seen_count,omitempty"` + // The rows returned as part of the request. + RowsReturnedCount int64 `protobuf:"varint,2,opt,name=rows_returned_count,json=rowsReturnedCount,proto3" json:"rows_returned_count,omitempty"` + // The cells seen (scanned) as part of the request. This includes the count of + // cells returned, as captured below. + CellsSeenCount int64 `protobuf:"varint,3,opt,name=cells_seen_count,json=cellsSeenCount,proto3" json:"cells_seen_count,omitempty"` + // The cells returned as part of the request. + CellsReturnedCount int64 `protobuf:"varint,4,opt,name=cells_returned_count,json=cellsReturnedCount,proto3" json:"cells_returned_count,omitempty"` +} + +func (x *ReadIterationStats) Reset() { + *x = ReadIterationStats{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadIterationStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadIterationStats) ProtoMessage() {} + +func (x *ReadIterationStats) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadIterationStats.ProtoReflect.Descriptor instead. +func (*ReadIterationStats) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_request_stats_proto_rawDescGZIP(), []int{0} +} + +func (x *ReadIterationStats) GetRowsSeenCount() int64 { + if x != nil { + return x.RowsSeenCount + } + return 0 +} + +func (x *ReadIterationStats) GetRowsReturnedCount() int64 { + if x != nil { + return x.RowsReturnedCount + } + return 0 +} + +func (x *ReadIterationStats) GetCellsSeenCount() int64 { + if x != nil { + return x.CellsSeenCount + } + return 0 +} + +func (x *ReadIterationStats) GetCellsReturnedCount() int64 { + if x != nil { + return x.CellsReturnedCount + } + return 0 +} + +// RequestLatencyStats provides a measurement of the latency of the request as +// it interacts with different systems over its lifetime, e.g. how long the +// request took to execute within a frontend server. +type RequestLatencyStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The latency measured by the frontend server handling this request, from + // when the request was received, to when this value is sent back in the + // response. For more context on the component that is measuring this latency, + // see: https://cloud.google.com/bigtable/docs/overview + // + // Note: This value may be slightly shorter than the value reported into + // aggregate latency metrics in Monitoring for this request + // (https://cloud.google.com/bigtable/docs/monitoring-instance) as this value + // needs to be sent in the response before the latency measurement including + // that transmission is finalized. + // + // Note: This value includes the end-to-end latency of contacting nodes in + // the targeted cluster, e.g. measuring from when the first byte arrives at + // the frontend server, to when this value is sent back as the last value in + // the response, including any latency incurred by contacting nodes, waiting + // for results from nodes, and finally sending results from nodes back to the + // caller. + FrontendServerLatency *durationpb.Duration `protobuf:"bytes,1,opt,name=frontend_server_latency,json=frontendServerLatency,proto3" json:"frontend_server_latency,omitempty"` +} + +func (x *RequestLatencyStats) Reset() { + *x = RequestLatencyStats{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestLatencyStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestLatencyStats) ProtoMessage() {} + +func (x *RequestLatencyStats) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestLatencyStats.ProtoReflect.Descriptor instead. +func (*RequestLatencyStats) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_request_stats_proto_rawDescGZIP(), []int{1} +} + +func (x *RequestLatencyStats) GetFrontendServerLatency() *durationpb.Duration { + if x != nil { + return x.FrontendServerLatency + } + return nil +} + +// FullReadStatsView captures all known information about a read. +type FullReadStatsView struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Iteration stats describe how efficient the read is, e.g. comparing + // rows seen vs. rows returned or cells seen vs cells returned can provide an + // indication of read efficiency (the higher the ratio of seen to retuned the + // better). + ReadIterationStats *ReadIterationStats `protobuf:"bytes,1,opt,name=read_iteration_stats,json=readIterationStats,proto3" json:"read_iteration_stats,omitempty"` + // Request latency stats describe the time taken to complete a request, from + // the server side. + RequestLatencyStats *RequestLatencyStats `protobuf:"bytes,2,opt,name=request_latency_stats,json=requestLatencyStats,proto3" json:"request_latency_stats,omitempty"` +} + +func (x *FullReadStatsView) Reset() { + *x = FullReadStatsView{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FullReadStatsView) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FullReadStatsView) ProtoMessage() {} + +func (x *FullReadStatsView) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FullReadStatsView.ProtoReflect.Descriptor instead. +func (*FullReadStatsView) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_request_stats_proto_rawDescGZIP(), []int{2} +} + +func (x *FullReadStatsView) GetReadIterationStats() *ReadIterationStats { + if x != nil { + return x.ReadIterationStats + } + return nil +} + +func (x *FullReadStatsView) GetRequestLatencyStats() *RequestLatencyStats { + if x != nil { + return x.RequestLatencyStats + } + return nil +} + +// RequestStats is the container for additional information pertaining to a +// single request, helpful for evaluating the performance of the sent request. +// Currently, there are the following supported methods: +// - google.bigtable.v2.ReadRows +type RequestStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Information pertaining to each request type received. The type is chosen + // based on the requested view. + // + // See the messages above for additional context. + // + // Types that are assignable to StatsView: + // + // *RequestStats_FullReadStatsView + StatsView isRequestStats_StatsView `protobuf_oneof:"stats_view"` +} + +func (x *RequestStats) Reset() { + *x = RequestStats{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestStats) ProtoMessage() {} + +func (x *RequestStats) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestStats.ProtoReflect.Descriptor instead. +func (*RequestStats) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_request_stats_proto_rawDescGZIP(), []int{3} +} + +func (m *RequestStats) GetStatsView() isRequestStats_StatsView { + if m != nil { + return m.StatsView + } + return nil +} + +func (x *RequestStats) GetFullReadStatsView() *FullReadStatsView { + if x, ok := x.GetStatsView().(*RequestStats_FullReadStatsView); ok { + return x.FullReadStatsView + } + return nil +} + +type isRequestStats_StatsView interface { + isRequestStats_StatsView() +} + +type RequestStats_FullReadStatsView struct { + // Available with the ReadRowsRequest.RequestStatsView.REQUEST_STATS_FULL + // view, see package google.bigtable.v2. + FullReadStatsView *FullReadStatsView `protobuf:"bytes,1,opt,name=full_read_stats_view,json=fullReadStatsView,proto3,oneof"` +} + +func (*RequestStats_FullReadStatsView) isRequestStats_StatsView() {} + +var File_google_bigtable_v2_request_stats_proto protoreflect.FileDescriptor + +var file_google_bigtable_v2_request_stats_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc8, 0x01, 0x0a, + 0x12, 0x52, 0x65, 0x61, 0x64, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x73, 0x65, 0x65, 0x6e, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x72, 0x6f, + 0x77, 0x73, 0x53, 0x65, 0x65, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x72, + 0x6f, 0x77, 0x73, 0x5f, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x72, 0x6f, 0x77, 0x73, 0x52, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x63, + 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x53, 0x65, 0x65, 0x6e, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x30, 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x12, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x68, 0x0a, 0x13, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x51, + 0x0a, 0x17, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x66, 0x72, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x22, 0xca, 0x01, 0x0a, 0x11, 0x46, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x61, 0x64, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x56, 0x69, 0x65, 0x77, 0x12, 0x58, 0x0a, 0x14, 0x72, 0x65, 0x61, 0x64, 0x5f, + 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x49, + 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x12, 0x72, + 0x65, 0x61, 0x64, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x12, 0x5b, 0x0a, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6c, 0x61, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4c, 0x61, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0x76, + 0x0a, 0x0c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x56, + 0x69, 0x65, 0x77, 0x48, 0x00, 0x52, 0x11, 0x66, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x61, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x56, 0x69, 0x65, 0x77, 0x42, 0x0c, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x42, 0xbd, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, + 0x32, 0x42, 0x11, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_bigtable_v2_request_stats_proto_rawDescOnce sync.Once + file_google_bigtable_v2_request_stats_proto_rawDescData = file_google_bigtable_v2_request_stats_proto_rawDesc +) + +func file_google_bigtable_v2_request_stats_proto_rawDescGZIP() []byte { + file_google_bigtable_v2_request_stats_proto_rawDescOnce.Do(func() { + file_google_bigtable_v2_request_stats_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_bigtable_v2_request_stats_proto_rawDescData) + }) + return file_google_bigtable_v2_request_stats_proto_rawDescData +} + +var file_google_bigtable_v2_request_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_bigtable_v2_request_stats_proto_goTypes = []interface{}{ + (*ReadIterationStats)(nil), // 0: google.bigtable.v2.ReadIterationStats + (*RequestLatencyStats)(nil), // 1: google.bigtable.v2.RequestLatencyStats + (*FullReadStatsView)(nil), // 2: google.bigtable.v2.FullReadStatsView + (*RequestStats)(nil), // 3: google.bigtable.v2.RequestStats + (*durationpb.Duration)(nil), // 4: google.protobuf.Duration +} +var file_google_bigtable_v2_request_stats_proto_depIdxs = []int32{ + 4, // 0: google.bigtable.v2.RequestLatencyStats.frontend_server_latency:type_name -> google.protobuf.Duration + 0, // 1: google.bigtable.v2.FullReadStatsView.read_iteration_stats:type_name -> google.bigtable.v2.ReadIterationStats + 1, // 2: google.bigtable.v2.FullReadStatsView.request_latency_stats:type_name -> google.bigtable.v2.RequestLatencyStats + 2, // 3: google.bigtable.v2.RequestStats.full_read_stats_view:type_name -> google.bigtable.v2.FullReadStatsView + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_google_bigtable_v2_request_stats_proto_init() } +func file_google_bigtable_v2_request_stats_proto_init() { + if File_google_bigtable_v2_request_stats_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_bigtable_v2_request_stats_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadIterationStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_request_stats_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestLatencyStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_request_stats_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FullReadStatsView); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_request_stats_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_bigtable_v2_request_stats_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*RequestStats_FullReadStatsView)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_bigtable_v2_request_stats_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_bigtable_v2_request_stats_proto_goTypes, + DependencyIndexes: file_google_bigtable_v2_request_stats_proto_depIdxs, + MessageInfos: file_google_bigtable_v2_request_stats_proto_msgTypes, + }.Build() + File_google_bigtable_v2_request_stats_proto = out.File + file_google_bigtable_v2_request_stats_proto_rawDesc = nil + file_google_bigtable_v2_request_stats_proto_goTypes = nil + file_google_bigtable_v2_request_stats_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/response_params.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/response_params.pb.go new file mode 100644 index 0000000000000..7a3c8d680b9fb --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/response_params.pb.go @@ -0,0 +1,191 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/bigtable/v2/response_params.proto + +package bigtable + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Response metadata proto +// This is an experimental feature that will be used to get zone_id and +// cluster_id from response trailers to tag the metrics. This should not be +// used by customers directly +type ResponseParams struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The cloud bigtable zone associated with the cluster. + ZoneId *string `protobuf:"bytes,1,opt,name=zone_id,json=zoneId,proto3,oneof" json:"zone_id,omitempty"` + // Identifier for a cluster that represents set of + // bigtable resources. + ClusterId *string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3,oneof" json:"cluster_id,omitempty"` +} + +func (x *ResponseParams) Reset() { + *x = ResponseParams{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_response_params_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseParams) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseParams) ProtoMessage() {} + +func (x *ResponseParams) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_response_params_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseParams.ProtoReflect.Descriptor instead. +func (*ResponseParams) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_response_params_proto_rawDescGZIP(), []int{0} +} + +func (x *ResponseParams) GetZoneId() string { + if x != nil && x.ZoneId != nil { + return *x.ZoneId + } + return "" +} + +func (x *ResponseParams) GetClusterId() string { + if x != nil && x.ClusterId != nil { + return *x.ClusterId + } + return "" +} + +var File_google_bigtable_v2_response_params_proto protoreflect.FileDescriptor + +var file_google_bigtable_v2_response_params_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x22, 0x6d, + 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x12, 0x1c, 0x0a, 0x07, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x22, + 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x88, + 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x42, 0x0d, + 0x0a, 0x0b, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x42, 0xbf, 0x01, + 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x13, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x3a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, + 0x76, 0x32, 0x3b, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, + 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_bigtable_v2_response_params_proto_rawDescOnce sync.Once + file_google_bigtable_v2_response_params_proto_rawDescData = file_google_bigtable_v2_response_params_proto_rawDesc +) + +func file_google_bigtable_v2_response_params_proto_rawDescGZIP() []byte { + file_google_bigtable_v2_response_params_proto_rawDescOnce.Do(func() { + file_google_bigtable_v2_response_params_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_bigtable_v2_response_params_proto_rawDescData) + }) + return file_google_bigtable_v2_response_params_proto_rawDescData +} + +var file_google_bigtable_v2_response_params_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_bigtable_v2_response_params_proto_goTypes = []interface{}{ + (*ResponseParams)(nil), // 0: google.bigtable.v2.ResponseParams +} +var file_google_bigtable_v2_response_params_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_bigtable_v2_response_params_proto_init() } +func file_google_bigtable_v2_response_params_proto_init() { + if File_google_bigtable_v2_response_params_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_bigtable_v2_response_params_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseParams); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_bigtable_v2_response_params_proto_msgTypes[0].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_bigtable_v2_response_params_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_bigtable_v2_response_params_proto_goTypes, + DependencyIndexes: file_google_bigtable_v2_response_params_proto_depIdxs, + MessageInfos: file_google_bigtable_v2_response_params_proto_msgTypes, + }.Build() + File_google_bigtable_v2_response_params_proto = out.File + file_google_bigtable_v2_response_params_proto_rawDesc = nil + file_google_bigtable_v2_response_params_proto_goTypes = nil + file_google_bigtable_v2_response_params_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go new file mode 100644 index 0000000000000..9fb745926a58f --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go @@ -0,0 +1,208 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by aliasgen. DO NOT EDIT. + +// Package iam aliases all exported identifiers in package +// "cloud.google.com/go/iam/apiv1/iampb". +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb. +// Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md +// for more details. +package iam + +import ( + src "cloud.google.com/go/iam/apiv1/iampb" + grpc "google.golang.org/grpc" +) + +// Deprecated: Please use consts in: cloud.google.com/go/iam/apiv1/iampb +const ( + AuditConfigDelta_ACTION_UNSPECIFIED = src.AuditConfigDelta_ACTION_UNSPECIFIED + AuditConfigDelta_ADD = src.AuditConfigDelta_ADD + AuditConfigDelta_REMOVE = src.AuditConfigDelta_REMOVE + AuditLogConfig_ADMIN_READ = src.AuditLogConfig_ADMIN_READ + AuditLogConfig_DATA_READ = src.AuditLogConfig_DATA_READ + AuditLogConfig_DATA_WRITE = src.AuditLogConfig_DATA_WRITE + AuditLogConfig_LOG_TYPE_UNSPECIFIED = src.AuditLogConfig_LOG_TYPE_UNSPECIFIED + BindingDelta_ACTION_UNSPECIFIED = src.BindingDelta_ACTION_UNSPECIFIED + BindingDelta_ADD = src.BindingDelta_ADD + BindingDelta_REMOVE = src.BindingDelta_REMOVE +) + +// Deprecated: Please use vars in: cloud.google.com/go/iam/apiv1/iampb +var ( + AuditConfigDelta_Action_name = src.AuditConfigDelta_Action_name + AuditConfigDelta_Action_value = src.AuditConfigDelta_Action_value + AuditLogConfig_LogType_name = src.AuditLogConfig_LogType_name + AuditLogConfig_LogType_value = src.AuditLogConfig_LogType_value + BindingDelta_Action_name = src.BindingDelta_Action_name + BindingDelta_Action_value = src.BindingDelta_Action_value + File_google_iam_v1_iam_policy_proto = src.File_google_iam_v1_iam_policy_proto + File_google_iam_v1_options_proto = src.File_google_iam_v1_options_proto + File_google_iam_v1_policy_proto = src.File_google_iam_v1_policy_proto +) + +// Specifies the audit configuration for a service. The configuration +// determines which permission types are logged, and what identities, if any, +// are exempted from logging. An AuditConfig must have one or more +// AuditLogConfigs. If there are AuditConfigs for both `allServices` and a +// specific service, the union of the two AuditConfigs is used for that +// service: the log_types specified in each AuditConfig are enabled, and the +// exempted_members in each AuditLogConfig are exempted. Example Policy with +// multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ +// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": +// "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", +// "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": +// "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For +// sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ +// logging. It also exempts jose@example.com from DATA_READ logging, and +// aliya@example.com from DATA_WRITE logging. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditConfig = src.AuditConfig + +// One delta entry for AuditConfig. Each individual change (only one +// exempted_member in each entry) to a AuditConfig will be a separate entry. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditConfigDelta = src.AuditConfigDelta + +// The type of action performed on an audit configuration in a policy. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditConfigDelta_Action = src.AuditConfigDelta_Action + +// Provides the configuration for logging a type of permissions. Example: { +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ +// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables +// 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from +// DATA_READ logging. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditLogConfig = src.AuditLogConfig + +// The list of valid permission types for which logging can be configured. +// Admin writes are always logged, and are not configurable. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditLogConfig_LogType = src.AuditLogConfig_LogType + +// Associates `members`, or principals, with a `role`. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type Binding = src.Binding + +// One delta entry for Binding. Each individual change (only one member in +// each entry) to a binding will be a separate entry. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type BindingDelta = src.BindingDelta + +// The type of action performed on a Binding in a policy. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type BindingDelta_Action = src.BindingDelta_Action + +// Request message for `GetIamPolicy` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type GetIamPolicyRequest = src.GetIamPolicyRequest + +// Encapsulates settings provided to GetIamPolicy. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type GetPolicyOptions = src.GetPolicyOptions + +// IAMPolicyClient is the client API for IAMPolicy service. For semantics +// around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type IAMPolicyClient = src.IAMPolicyClient + +// IAMPolicyServer is the server API for IAMPolicy service. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type IAMPolicyServer = src.IAMPolicyServer + +// An Identity and Access Management (IAM) policy, which specifies access +// controls for Google Cloud resources. A `Policy` is a collection of +// `bindings`. A `binding` binds one or more `members`, or principals, to a +// single `role`. Principals can be user accounts, service accounts, Google +// groups, and domains (such as G Suite). A `role` is a named list of +// permissions; each `role` can be an IAM predefined role or a user-created +// custom role. For some types of Google Cloud resources, a `binding` can also +// specify a `condition`, which is a logical expression that allows access to a +// resource only if the expression evaluates to `true`. A condition can add +// constraints based on attributes of the request, the resource, or both. To +// learn which resources support conditions in their IAM policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-policies). +// **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": +// "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": +// "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: - +// user:mike@example.com - group:admins@example.com - domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - user:eve@example.com +// role: roles/resourcemanager.organizationViewer condition: title: expirable +// access description: Does not grant access after Sep 2020 expression: +// request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= +// version: 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type Policy = src.Policy + +// The difference delta between two policies. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type PolicyDelta = src.PolicyDelta + +// Request message for `SetIamPolicy` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type SetIamPolicyRequest = src.SetIamPolicyRequest + +// Request message for `TestIamPermissions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type TestIamPermissionsRequest = src.TestIamPermissionsRequest + +// Response message for `TestIamPermissions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type TestIamPermissionsResponse = src.TestIamPermissionsResponse + +// UnimplementedIAMPolicyServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type UnimplementedIAMPolicyServer = src.UnimplementedIAMPolicyServer + +// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb +func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient { + return src.NewIAMPolicyClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb +func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { + src.RegisterIAMPolicyServer(s, srv) +} diff --git a/vendor/google.golang.org/genproto/googleapis/longrunning/alias.go b/vendor/google.golang.org/genproto/googleapis/longrunning/alias.go new file mode 100644 index 0000000000000..3addf3b11b1db --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/longrunning/alias.go @@ -0,0 +1,115 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by aliasgen. DO NOT EDIT. + +// Package longrunning aliases all exported identifiers in package +// "cloud.google.com/go/longrunning/autogen/longrunningpb". +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb. +// Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md +// for more details. +package longrunning + +import ( + src "cloud.google.com/go/longrunning/autogen/longrunningpb" + grpc "google.golang.org/grpc" +) + +// Deprecated: Please use vars in: cloud.google.com/go/longrunning/autogen/longrunningpb +var ( + E_OperationInfo = src.E_OperationInfo + File_google_longrunning_operations_proto = src.File_google_longrunning_operations_proto +) + +// The request message for +// [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type CancelOperationRequest = src.CancelOperationRequest + +// The request message for +// [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type DeleteOperationRequest = src.DeleteOperationRequest + +// The request message for +// [Operations.GetOperation][google.longrunning.Operations.GetOperation]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type GetOperationRequest = src.GetOperationRequest + +// The request message for +// [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type ListOperationsRequest = src.ListOperationsRequest + +// The response message for +// [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type ListOperationsResponse = src.ListOperationsResponse + +// This resource represents a long-running operation that is the result of a +// network API call. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type Operation = src.Operation + +// A message representing the message types used by a long-running operation. +// Example: rpc LongRunningRecognize(LongRunningRecognizeRequest) returns +// (google.longrunning.Operation) { option (google.longrunning.operation_info) +// = { response_type: "LongRunningRecognizeResponse" metadata_type: +// "LongRunningRecognizeMetadata" }; } +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type OperationInfo = src.OperationInfo +type Operation_Error = src.Operation_Error +type Operation_Response = src.Operation_Response + +// OperationsClient is the client API for Operations service. For semantics +// around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type OperationsClient = src.OperationsClient + +// OperationsServer is the server API for Operations service. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type OperationsServer = src.OperationsServer + +// UnimplementedOperationsServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type UnimplementedOperationsServer = src.UnimplementedOperationsServer + +// The request message for +// [Operations.WaitOperation][google.longrunning.Operations.WaitOperation]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type WaitOperationRequest = src.WaitOperationRequest + +// Deprecated: Please use funcs in: cloud.google.com/go/longrunning/autogen/longrunningpb +func NewOperationsClient(cc grpc.ClientConnInterface) OperationsClient { + return src.NewOperationsClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/longrunning/autogen/longrunningpb +func RegisterOperationsServer(s *grpc.Server, srv OperationsServer) { + src.RegisterOperationsServer(s, srv) +} diff --git a/vendor/google.golang.org/genproto/googleapis/pubsub/v1/alias.go b/vendor/google.golang.org/genproto/googleapis/pubsub/v1/alias.go new file mode 100644 index 0000000000000..0fc2391b85a03 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/pubsub/v1/alias.go @@ -0,0 +1,503 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by aliasgen. DO NOT EDIT. + +// Package pubsub aliases all exported identifiers in package +// "cloud.google.com/go/pubsub/apiv1/pubsubpb". +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb. +// Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md +// for more details. +package pubsub + +import ( + src "cloud.google.com/go/pubsub/apiv1/pubsubpb" + grpc "google.golang.org/grpc" +) + +// Deprecated: Please use consts in: cloud.google.com/go/pubsub/apiv1/pubsubpb +const ( + BigQueryConfig_ACTIVE = src.BigQueryConfig_ACTIVE + BigQueryConfig_NOT_FOUND = src.BigQueryConfig_NOT_FOUND + BigQueryConfig_PERMISSION_DENIED = src.BigQueryConfig_PERMISSION_DENIED + BigQueryConfig_SCHEMA_MISMATCH = src.BigQueryConfig_SCHEMA_MISMATCH + BigQueryConfig_STATE_UNSPECIFIED = src.BigQueryConfig_STATE_UNSPECIFIED + Encoding_BINARY = src.Encoding_BINARY + Encoding_ENCODING_UNSPECIFIED = src.Encoding_ENCODING_UNSPECIFIED + Encoding_JSON = src.Encoding_JSON + SchemaView_BASIC = src.SchemaView_BASIC + SchemaView_FULL = src.SchemaView_FULL + SchemaView_SCHEMA_VIEW_UNSPECIFIED = src.SchemaView_SCHEMA_VIEW_UNSPECIFIED + Schema_AVRO = src.Schema_AVRO + Schema_PROTOCOL_BUFFER = src.Schema_PROTOCOL_BUFFER + Schema_TYPE_UNSPECIFIED = src.Schema_TYPE_UNSPECIFIED + Subscription_ACTIVE = src.Subscription_ACTIVE + Subscription_RESOURCE_ERROR = src.Subscription_RESOURCE_ERROR + Subscription_STATE_UNSPECIFIED = src.Subscription_STATE_UNSPECIFIED +) + +// Deprecated: Please use vars in: cloud.google.com/go/pubsub/apiv1/pubsubpb +var ( + BigQueryConfig_State_name = src.BigQueryConfig_State_name + BigQueryConfig_State_value = src.BigQueryConfig_State_value + Encoding_name = src.Encoding_name + Encoding_value = src.Encoding_value + File_google_pubsub_v1_pubsub_proto = src.File_google_pubsub_v1_pubsub_proto + File_google_pubsub_v1_schema_proto = src.File_google_pubsub_v1_schema_proto + SchemaView_name = src.SchemaView_name + SchemaView_value = src.SchemaView_value + Schema_Type_name = src.Schema_Type_name + Schema_Type_value = src.Schema_Type_value + Subscription_State_name = src.Subscription_State_name + Subscription_State_value = src.Subscription_State_value +) + +// Request for the Acknowledge method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type AcknowledgeRequest = src.AcknowledgeRequest + +// Configuration for a BigQuery subscription. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type BigQueryConfig = src.BigQueryConfig + +// Possible states for a BigQuery subscription. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type BigQueryConfig_State = src.BigQueryConfig_State + +// Request for the CreateSchema method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type CreateSchemaRequest = src.CreateSchemaRequest + +// Request for the `CreateSnapshot` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type CreateSnapshotRequest = src.CreateSnapshotRequest + +// Dead lettering is done on a best effort basis. The same message might be +// dead lettered multiple times. If validation on any of the fields fails at +// subscription creation/updation, the create/update subscription request will +// fail. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type DeadLetterPolicy = src.DeadLetterPolicy + +// Request for the `DeleteSchema` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type DeleteSchemaRequest = src.DeleteSchemaRequest + +// Request for the `DeleteSnapshot` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type DeleteSnapshotRequest = src.DeleteSnapshotRequest + +// Request for the DeleteSubscription method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type DeleteSubscriptionRequest = src.DeleteSubscriptionRequest + +// Request for the `DeleteTopic` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type DeleteTopicRequest = src.DeleteTopicRequest + +// Request for the DetachSubscription method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type DetachSubscriptionRequest = src.DetachSubscriptionRequest + +// Response for the DetachSubscription method. Reserved for future use. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type DetachSubscriptionResponse = src.DetachSubscriptionResponse + +// Possible encoding types for messages. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type Encoding = src.Encoding + +// A policy that specifies the conditions for resource expiration (i.e., +// automatic resource deletion). +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ExpirationPolicy = src.ExpirationPolicy + +// Request for the GetSchema method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type GetSchemaRequest = src.GetSchemaRequest + +// Request for the GetSnapshot method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type GetSnapshotRequest = src.GetSnapshotRequest + +// Request for the GetSubscription method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type GetSubscriptionRequest = src.GetSubscriptionRequest + +// Request for the GetTopic method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type GetTopicRequest = src.GetTopicRequest + +// Request for the `ListSchemas` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListSchemasRequest = src.ListSchemasRequest + +// Response for the `ListSchemas` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListSchemasResponse = src.ListSchemasResponse + +// Request for the `ListSnapshots` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListSnapshotsRequest = src.ListSnapshotsRequest + +// Response for the `ListSnapshots` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListSnapshotsResponse = src.ListSnapshotsResponse + +// Request for the `ListSubscriptions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListSubscriptionsRequest = src.ListSubscriptionsRequest + +// Response for the `ListSubscriptions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListSubscriptionsResponse = src.ListSubscriptionsResponse + +// Request for the `ListTopicSnapshots` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListTopicSnapshotsRequest = src.ListTopicSnapshotsRequest + +// Response for the `ListTopicSnapshots` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListTopicSnapshotsResponse = src.ListTopicSnapshotsResponse + +// Request for the `ListTopicSubscriptions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListTopicSubscriptionsRequest = src.ListTopicSubscriptionsRequest + +// Response for the `ListTopicSubscriptions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListTopicSubscriptionsResponse = src.ListTopicSubscriptionsResponse + +// Request for the `ListTopics` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListTopicsRequest = src.ListTopicsRequest + +// Response for the `ListTopics` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListTopicsResponse = src.ListTopicsResponse + +// A policy constraining the storage of messages published to the topic. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type MessageStoragePolicy = src.MessageStoragePolicy + +// Request for the ModifyAckDeadline method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ModifyAckDeadlineRequest = src.ModifyAckDeadlineRequest + +// Request for the ModifyPushConfig method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ModifyPushConfigRequest = src.ModifyPushConfigRequest + +// Request for the Publish method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PublishRequest = src.PublishRequest + +// Response for the `Publish` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PublishResponse = src.PublishResponse + +// PublisherClient is the client API for Publisher service. For semantics +// around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PublisherClient = src.PublisherClient + +// PublisherServer is the server API for Publisher service. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PublisherServer = src.PublisherServer + +// A message that is published by publishers and consumed by subscribers. The +// message must contain either a non-empty data field or at least one +// attribute. Note that client libraries represent this object differently +// depending on the language. See the corresponding [client library +// documentation](https://cloud.google.com/pubsub/docs/reference/libraries) for +// more information. See [quotas and limits] +// (https://cloud.google.com/pubsub/quotas) for more information about message +// limits. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PubsubMessage = src.PubsubMessage + +// Request for the `Pull` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PullRequest = src.PullRequest + +// Response for the `Pull` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PullResponse = src.PullResponse + +// Configuration for a push delivery endpoint. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PushConfig = src.PushConfig + +// Contains information needed for generating an [OpenID Connect +// token](https://developers.google.com/identity/protocols/OpenIDConnect). +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PushConfig_OidcToken = src.PushConfig_OidcToken +type PushConfig_OidcToken_ = src.PushConfig_OidcToken_ + +// A message and its corresponding acknowledgment ID. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ReceivedMessage = src.ReceivedMessage + +// A policy that specifies how Cloud Pub/Sub retries message delivery. Retry +// delay will be exponential based on provided minimum and maximum backoffs. +// https://en.wikipedia.org/wiki/Exponential_backoff. RetryPolicy will be +// triggered on NACKs or acknowledgement deadline exceeded events for a given +// message. Retry Policy is implemented on a best effort basis. At times, the +// delay between consecutive deliveries may not match the configuration. That +// is, delay can be more or less than configured backoff. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type RetryPolicy = src.RetryPolicy + +// A schema resource. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type Schema = src.Schema + +// SchemaServiceClient is the client API for SchemaService service. For +// semantics around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type SchemaServiceClient = src.SchemaServiceClient + +// SchemaServiceServer is the server API for SchemaService service. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type SchemaServiceServer = src.SchemaServiceServer + +// Settings for validating messages published against a schema. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type SchemaSettings = src.SchemaSettings + +// View of Schema object fields to be returned by GetSchema and ListSchemas. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type SchemaView = src.SchemaView + +// Possible schema definition types. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type Schema_Type = src.Schema_Type + +// Request for the `Seek` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type SeekRequest = src.SeekRequest +type SeekRequest_Snapshot = src.SeekRequest_Snapshot +type SeekRequest_Time = src.SeekRequest_Time + +// Response for the `Seek` method (this response is empty). +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type SeekResponse = src.SeekResponse + +// A snapshot resource. Snapshots are used in +// [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, +// which allow you to manage message acknowledgments in bulk. That is, you can +// set the acknowledgment state of messages in an existing subscription to the +// state captured by a snapshot. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type Snapshot = src.Snapshot + +// Request for the `StreamingPull` streaming RPC method. This request is used +// to establish the initial stream as well as to stream acknowledgements and +// ack deadline modifications from the client to the server. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type StreamingPullRequest = src.StreamingPullRequest + +// Response for the `StreamingPull` method. This response is used to stream +// messages from the server to the client. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type StreamingPullResponse = src.StreamingPullResponse + +// Acknowledgement IDs sent in one or more previous requests to acknowledge a +// previously received message. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type StreamingPullResponse_AcknowledgeConfirmation = src.StreamingPullResponse_AcknowledgeConfirmation + +// Acknowledgement IDs sent in one or more previous requests to modify the +// deadline for a specific message. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type StreamingPullResponse_ModifyAckDeadlineConfirmation = src.StreamingPullResponse_ModifyAckDeadlineConfirmation + +// Subscription properties sent as part of the response. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type StreamingPullResponse_SubscriptionProperties = src.StreamingPullResponse_SubscriptionProperties + +// SubscriberClient is the client API for Subscriber service. For semantics +// around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type SubscriberClient = src.SubscriberClient + +// SubscriberServer is the server API for Subscriber service. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type SubscriberServer = src.SubscriberServer +type Subscriber_StreamingPullClient = src.Subscriber_StreamingPullClient +type Subscriber_StreamingPullServer = src.Subscriber_StreamingPullServer + +// A subscription resource. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type Subscription = src.Subscription + +// Possible states for a subscription. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type Subscription_State = src.Subscription_State + +// A topic resource. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type Topic = src.Topic + +// UnimplementedPublisherServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type UnimplementedPublisherServer = src.UnimplementedPublisherServer + +// UnimplementedSchemaServiceServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type UnimplementedSchemaServiceServer = src.UnimplementedSchemaServiceServer + +// UnimplementedSubscriberServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type UnimplementedSubscriberServer = src.UnimplementedSubscriberServer + +// Request for the UpdateSnapshot method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type UpdateSnapshotRequest = src.UpdateSnapshotRequest + +// Request for the UpdateSubscription method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type UpdateSubscriptionRequest = src.UpdateSubscriptionRequest + +// Request for the UpdateTopic method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type UpdateTopicRequest = src.UpdateTopicRequest + +// Request for the `ValidateMessage` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ValidateMessageRequest = src.ValidateMessageRequest +type ValidateMessageRequest_Name = src.ValidateMessageRequest_Name +type ValidateMessageRequest_Schema = src.ValidateMessageRequest_Schema + +// Response for the `ValidateMessage` method. Empty for now. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ValidateMessageResponse = src.ValidateMessageResponse + +// Request for the `ValidateSchema` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ValidateSchemaRequest = src.ValidateSchemaRequest + +// Response for the `ValidateSchema` method. Empty for now. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ValidateSchemaResponse = src.ValidateSchemaResponse + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/apiv1/pubsubpb +func NewPublisherClient(cc grpc.ClientConnInterface) PublisherClient { + return src.NewPublisherClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/apiv1/pubsubpb +func NewSchemaServiceClient(cc grpc.ClientConnInterface) SchemaServiceClient { + return src.NewSchemaServiceClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/apiv1/pubsubpb +func NewSubscriberClient(cc grpc.ClientConnInterface) SubscriberClient { + return src.NewSubscriberClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/apiv1/pubsubpb +func RegisterPublisherServer(s *grpc.Server, srv PublisherServer) { + src.RegisterPublisherServer(s, srv) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/apiv1/pubsubpb +func RegisterSchemaServiceServer(s *grpc.Server, srv SchemaServiceServer) { + src.RegisterSchemaServiceServer(s, srv) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/apiv1/pubsubpb +func RegisterSubscriberServer(s *grpc.Server, srv SubscriberServer) { + src.RegisterSubscriberServer(s, srv) +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go index 1258803152f3e..cc5d52fbcc3a2 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/code.proto package code @@ -37,7 +37,6 @@ const ( // The canonical error codes for gRPC APIs. // -// // Sometimes multiple error codes may apply. Services should return // the most specific error code that applies. For example, prefer // `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. @@ -45,7 +44,7 @@ const ( type Code int32 const ( - // Not an error; returned on success + // Not an error; returned on success. // // HTTP Mapping: 200 OK Code_OK Code = 0 @@ -79,7 +78,7 @@ const ( // Some requested entity (e.g., file or directory) was not found. // // Note to server developers: if a request is denied for an entire class - // of users, such as gradual feature rollout or undocumented whitelist, + // of users, such as gradual feature rollout or undocumented allowlist, // `NOT_FOUND` may be used. If a request is denied for some users within // a class of users, such as user-based access control, `PERMISSION_DENIED` // must be used. @@ -119,15 +118,16 @@ const ( // // Service implementors can use the following guidelines to decide // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: - // (a) Use `UNAVAILABLE` if the client can retry just the failing call. - // (b) Use `ABORTED` if the client should retry at a higher level - // (e.g., when a client-specified test-and-set fails, indicating the - // client should restart a read-modify-write sequence). - // (c) Use `FAILED_PRECONDITION` if the client should not retry until - // the system state has been explicitly fixed. E.g., if an "rmdir" - // fails because the directory is non-empty, `FAILED_PRECONDITION` - // should be returned since the client should not retry unless - // the files are deleted from the directory. + // + // (a) Use `UNAVAILABLE` if the client can retry just the failing call. + // (b) Use `ABORTED` if the client should retry at a higher level. For + // example, when a client-specified test-and-set fails, indicating the + // client should restart a read-modify-write sequence. + // (c) Use `FAILED_PRECONDITION` if the client should not retry until + // the system state has been explicitly fixed. For example, if an "rmdir" + // fails because the directory is non-empty, `FAILED_PRECONDITION` + // should be returned since the client should not retry unless + // the files are deleted from the directory. // // HTTP Mapping: 400 Bad Request Code_FAILED_PRECONDITION Code = 9 diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 1c7b93ec160b2..7bd161e48ad7a 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/error_details.proto package errdetails @@ -36,6 +36,112 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// Describes the cause of the error with structured details. +// +// Example of an error when contacting the "pubsub.googleapis.com" API when it +// is not enabled: +// +// { "reason": "API_DISABLED" +// "domain": "googleapis.com" +// "metadata": { +// "resource": "projects/123", +// "service": "pubsub.googleapis.com" +// } +// } +// +// This response indicates that the pubsub.googleapis.com API is not enabled. +// +// Example of an error that is returned when attempting to create a Spanner +// instance in a region that is out of stock: +// +// { "reason": "STOCKOUT" +// "domain": "spanner.googleapis.com", +// "metadata": { +// "availableRegions": "us-central1,us-east2" +// } +// } +type ErrorInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The reason of the error. This is a constant value that identifies the + // proximate cause of the error. Error reasons are unique within a particular + // domain of errors. This should be at most 63 characters and match a + // regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents + // UPPER_SNAKE_CASE. + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` + // The logical grouping to which the "reason" belongs. The error domain + // is typically the registered service name of the tool or product that + // generates the error. Example: "pubsub.googleapis.com". If the error is + // generated by some common infrastructure, the error domain must be a + // globally unique value that identifies the infrastructure. For Google API + // infrastructure, the error domain is "googleapis.com". + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` + // Additional structured details about this error. + // + // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the units + // should be contained in the key, not the value. For example, rather than + // {"instanceLimit": "100/request"}, should be returned as, + // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // instances that can be created in a single (batch) request. + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ErrorInfo) Reset() { + *x = ErrorInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorInfo) ProtoMessage() {} + +func (x *ErrorInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead. +func (*ErrorInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0} +} + +func (x *ErrorInfo) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *ErrorInfo) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ErrorInfo) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + // Describes when the clients can retry a failed request. Clients could ignore // the recommendation here or retry when this information is missing from error // responses. @@ -61,7 +167,7 @@ type RetryInfo struct { func (x *RetryInfo) Reset() { *x = RetryInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[0] + mi := &file_google_rpc_error_details_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -74,7 +180,7 @@ func (x *RetryInfo) String() string { func (*RetryInfo) ProtoMessage() {} func (x *RetryInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[0] + mi := &file_google_rpc_error_details_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -87,7 +193,7 @@ func (x *RetryInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use RetryInfo.ProtoReflect.Descriptor instead. func (*RetryInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0} + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1} } func (x *RetryInfo) GetRetryDelay() *durationpb.Duration { @@ -112,7 +218,7 @@ type DebugInfo struct { func (x *DebugInfo) Reset() { *x = DebugInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[1] + mi := &file_google_rpc_error_details_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -125,7 +231,7 @@ func (x *DebugInfo) String() string { func (*DebugInfo) ProtoMessage() {} func (x *DebugInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[1] + mi := &file_google_rpc_error_details_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -138,7 +244,7 @@ func (x *DebugInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use DebugInfo.ProtoReflect.Descriptor instead. func (*DebugInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1} + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2} } func (x *DebugInfo) GetStackEntries() []string { @@ -178,7 +284,7 @@ type QuotaFailure struct { func (x *QuotaFailure) Reset() { *x = QuotaFailure{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[2] + mi := &file_google_rpc_error_details_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -191,7 +297,7 @@ func (x *QuotaFailure) String() string { func (*QuotaFailure) ProtoMessage() {} func (x *QuotaFailure) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[2] + mi := &file_google_rpc_error_details_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -204,7 +310,7 @@ func (x *QuotaFailure) ProtoReflect() protoreflect.Message { // Deprecated: Use QuotaFailure.ProtoReflect.Descriptor instead. func (*QuotaFailure) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2} + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3} } func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { @@ -214,111 +320,6 @@ func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { return nil } -// Describes the cause of the error with structured details. -// -// Example of an error when contacting the "pubsub.googleapis.com" API when it -// is not enabled: -// -// { "reason": "API_DISABLED" -// "domain": "googleapis.com" -// "metadata": { -// "resource": "projects/123", -// "service": "pubsub.googleapis.com" -// } -// } -// -// This response indicates that the pubsub.googleapis.com API is not enabled. -// -// Example of an error that is returned when attempting to create a Spanner -// instance in a region that is out of stock: -// -// { "reason": "STOCKOUT" -// "domain": "spanner.googleapis.com", -// "metadata": { -// "availableRegions": "us-central1,us-east2" -// } -// } -type ErrorInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The reason of the error. This is a constant value that identifies the - // proximate cause of the error. Error reasons are unique within a particular - // domain of errors. This should be at most 63 characters and match - // /[A-Z0-9_]+/. - Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` - // The logical grouping to which the "reason" belongs. The error domain - // is typically the registered service name of the tool or product that - // generates the error. Example: "pubsub.googleapis.com". If the error is - // generated by some common infrastructure, the error domain must be a - // globally unique value that identifies the infrastructure. For Google API - // infrastructure, the error domain is "googleapis.com". - Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` - // Additional structured details about this error. - // - // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in - // length. When identifying the current value of an exceeded limit, the units - // should be contained in the key, not the value. For example, rather than - // {"instanceLimit": "100/request"}, should be returned as, - // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of - // instances that can be created in a single (batch) request. - Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *ErrorInfo) Reset() { - *x = ErrorInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ErrorInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ErrorInfo) ProtoMessage() {} - -func (x *ErrorInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead. -func (*ErrorInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3} -} - -func (x *ErrorInfo) GetReason() string { - if x != nil { - return x.Reason - } - return "" -} - -func (x *ErrorInfo) GetDomain() string { - if x != nil { - return x.Domain - } - return "" -} - -func (x *ErrorInfo) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - // Describes what preconditions have failed. // // For example, if an RPC failed because it required the Terms of Service to be @@ -495,7 +496,8 @@ type ResourceInfo struct { ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` // The name of the resource being accessed. For example, a shared calendar // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current - // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + // error is + // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` // The owner of the resource (optional). // For example, "user:" or "project: google.protobuf.Duration - 10, // 1: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation - 11, // 2: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link @@ -1089,7 +1125,7 @@ func file_google_rpc_error_details_proto_init() { } if !protoimpl.UnsafeEnabled { file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RetryInfo); i { + switch v := v.(*ErrorInfo); i { case 0: return &v.state case 1: @@ -1101,7 +1137,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DebugInfo); i { + switch v := v.(*RetryInfo); i { case 0: return &v.state case 1: @@ -1113,7 +1149,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QuotaFailure); i { + switch v := v.(*DebugInfo); i { case 0: return &v.state case 1: @@ -1125,7 +1161,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorInfo); i { + switch v := v.(*QuotaFailure); i { case 0: return &v.state case 1: @@ -1208,7 +1244,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QuotaFailure_Violation); i { case 0: return &v.state diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index f34a38e4e95f6..a6b5081888ba4 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/status.proto package status @@ -48,11 +48,13 @@ type Status struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. diff --git a/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go b/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go new file mode 100644 index 0000000000000..72afd8b000e5a --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go @@ -0,0 +1,200 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/type/date.proto + +package date + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Represents a whole or partial calendar date, such as a birthday. The time of +// day and time zone are either specified elsewhere or are insignificant. The +// date is relative to the Gregorian Calendar. This can represent one of the +// following: +// +// * A full date, with non-zero year, month, and day values +// * A month and day value, with a zero year, such as an anniversary +// * A year on its own, with zero month and day values +// * A year and month value, with a zero day, such as a credit card expiration +// date +// +// Related types are [google.type.TimeOfDay][google.type.TimeOfDay] and +// `google.protobuf.Timestamp`. +type Date struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Year of the date. Must be from 1 to 9999, or 0 to specify a date without + // a year. + Year int32 `protobuf:"varint,1,opt,name=year,proto3" json:"year,omitempty"` + // Month of a year. Must be from 1 to 12, or 0 to specify a year without a + // month and day. + Month int32 `protobuf:"varint,2,opt,name=month,proto3" json:"month,omitempty"` + // Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 + // to specify a year by itself or a year and month where the day isn't + // significant. + Day int32 `protobuf:"varint,3,opt,name=day,proto3" json:"day,omitempty"` +} + +func (x *Date) Reset() { + *x = Date{} + if protoimpl.UnsafeEnabled { + mi := &file_google_type_date_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Date) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Date) ProtoMessage() {} + +func (x *Date) ProtoReflect() protoreflect.Message { + mi := &file_google_type_date_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Date.ProtoReflect.Descriptor instead. +func (*Date) Descriptor() ([]byte, []int) { + return file_google_type_date_proto_rawDescGZIP(), []int{0} +} + +func (x *Date) GetYear() int32 { + if x != nil { + return x.Year + } + return 0 +} + +func (x *Date) GetMonth() int32 { + if x != nil { + return x.Month + } + return 0 +} + +func (x *Date) GetDay() int32 { + if x != nil { + return x.Day + } + return 0 +} + +var File_google_type_date_proto protoreflect.FileDescriptor + +var file_google_type_date_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x22, 0x42, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x79, 0x65, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x79, 0x65, 0x61, + 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x61, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x64, 0x61, 0x79, 0x42, 0x5d, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x42, 0x09, 0x44, 0x61, + 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3b, 0x64, 0x61, 0x74, 0x65, 0xf8, + 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_type_date_proto_rawDescOnce sync.Once + file_google_type_date_proto_rawDescData = file_google_type_date_proto_rawDesc +) + +func file_google_type_date_proto_rawDescGZIP() []byte { + file_google_type_date_proto_rawDescOnce.Do(func() { + file_google_type_date_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_date_proto_rawDescData) + }) + return file_google_type_date_proto_rawDescData +} + +var file_google_type_date_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_type_date_proto_goTypes = []interface{}{ + (*Date)(nil), // 0: google.type.Date +} +var file_google_type_date_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_type_date_proto_init() } +func file_google_type_date_proto_init() { + if File_google_type_date_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_type_date_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Date); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_type_date_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_type_date_proto_goTypes, + DependencyIndexes: file_google_type_date_proto_depIdxs, + MessageInfos: file_google_type_date_proto_msgTypes, + }.Build() + File_google_type_date_proto = out.File + file_google_type_date_proto_rawDesc = nil + file_google_type_date_proto_goTypes = nil + file_google_type_date_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go index 2857e26eb386e..38ef56f73cadd 100644 --- a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go @@ -41,27 +41,27 @@ const ( // // Example (Comparison): // -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" +// title: "Summary size limit" +// description: "Determines if a summary is less than 100 chars" +// expression: "document.summary.size() < 100" // // Example (Equality): // -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == request.auth.claims.email" +// title: "Requestor is owner" +// description: "Determines if requestor is the document owner" +// expression: "document.owner == request.auth.claims.email" // // Example (Logic): // -// title: "Public documents" -// description: "Determine whether the document should be publicly visible" -// expression: "document.type != 'private' && document.type != 'internal'" +// title: "Public documents" +// description: "Determine whether the document should be publicly visible" +// expression: "document.type != 'private' && document.type != 'internal'" // // Example (Data Manipulation): // -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + string(document.create_time)" +// title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + string(document.create_time)" // // The exact variables and functions that may be referenced within an expression // are determined by the service that evaluates it. See the service diff --git a/vendor/google.golang.org/protobuf/AUTHORS b/vendor/google.golang.org/protobuf/AUTHORS deleted file mode 100644 index 2b00ddba0dfee..0000000000000 --- a/vendor/google.golang.org/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/google.golang.org/protobuf/CONTRIBUTORS b/vendor/google.golang.org/protobuf/CONTRIBUTORS deleted file mode 100644 index 1fbd3e976faf5..0000000000000 --- a/vendor/google.golang.org/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 07da5db3450e8..5f28148d805b3 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -19,7 +19,7 @@ import ( "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/set" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -113,7 +113,7 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { } // unmarshalMessage unmarshals a message into the given protoreflect.Message. -func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { +func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { return unmarshal(d, m) } @@ -159,10 +159,10 @@ func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { } // Get the FieldDescriptor. - var fd pref.FieldDescriptor + var fd protoreflect.FieldDescriptor if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { // Only extension names are in [name] format. - extName := pref.FullName(name[1 : len(name)-1]) + extName := protoreflect.FullName(name[1 : len(name)-1]) extType, err := d.opts.Resolver.FindExtensionByName(extName) if err != nil && err != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) @@ -240,23 +240,23 @@ func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { } } -func isKnownValue(fd pref.FieldDescriptor) bool { +func isKnownValue(fd protoreflect.FieldDescriptor) bool { md := fd.Message() return md != nil && md.FullName() == genid.Value_message_fullname } -func isNullValue(fd pref.FieldDescriptor) bool { +func isNullValue(fd protoreflect.FieldDescriptor) bool { ed := fd.Enum() return ed != nil && ed.FullName() == genid.NullValue_enum_fullname } // unmarshalSingular unmarshals to the non-repeated field specified // by the given FieldDescriptor. -func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) error { - var val pref.Value +func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error { + var val protoreflect.Value var err error switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: val = m.NewField(fd) err = d.unmarshalMessage(val.Message(), false) default: @@ -272,63 +272,63 @@ func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) erro // unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by // the given FieldDescriptor. -func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { const b32 int = 32 const b64 int = 64 tok, err := d.Read() if err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } kind := fd.Kind() switch kind { - case pref.BoolKind: + case protoreflect.BoolKind: if tok.Kind() == json.Bool { - return pref.ValueOfBool(tok.Bool()), nil + return protoreflect.ValueOfBool(tok.Bool()), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if v, ok := unmarshalInt(tok, b32); ok { return v, nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if v, ok := unmarshalInt(tok, b64); ok { return v, nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if v, ok := unmarshalUint(tok, b32); ok { return v, nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if v, ok := unmarshalUint(tok, b64); ok { return v, nil } - case pref.FloatKind: + case protoreflect.FloatKind: if v, ok := unmarshalFloat(tok, b32); ok { return v, nil } - case pref.DoubleKind: + case protoreflect.DoubleKind: if v, ok := unmarshalFloat(tok, b64); ok { return v, nil } - case pref.StringKind: + case protoreflect.StringKind: if tok.Kind() == json.String { - return pref.ValueOfString(tok.ParsedString()), nil + return protoreflect.ValueOfString(tok.ParsedString()), nil } - case pref.BytesKind: + case protoreflect.BytesKind: if v, ok := unmarshalBytes(tok); ok { return v, nil } - case pref.EnumKind: + case protoreflect.EnumKind: if v, ok := unmarshalEnum(tok, fd); ok { return v, nil } @@ -337,10 +337,10 @@ func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) } - return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) } -func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) { +func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { switch tok.Kind() { case json.Number: return getInt(tok, bitSize) @@ -349,30 +349,30 @@ func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) { // Decode number from string. s := strings.TrimSpace(tok.ParsedString()) if len(s) != len(tok.ParsedString()) { - return pref.Value{}, false + return protoreflect.Value{}, false } dec := json.NewDecoder([]byte(s)) tok, err := dec.Read() if err != nil { - return pref.Value{}, false + return protoreflect.Value{}, false } return getInt(tok, bitSize) } - return pref.Value{}, false + return protoreflect.Value{}, false } -func getInt(tok json.Token, bitSize int) (pref.Value, bool) { +func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { n, ok := tok.Int(bitSize) if !ok { - return pref.Value{}, false + return protoreflect.Value{}, false } if bitSize == 32 { - return pref.ValueOfInt32(int32(n)), true + return protoreflect.ValueOfInt32(int32(n)), true } - return pref.ValueOfInt64(n), true + return protoreflect.ValueOfInt64(n), true } -func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) { +func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { switch tok.Kind() { case json.Number: return getUint(tok, bitSize) @@ -381,30 +381,30 @@ func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) { // Decode number from string. s := strings.TrimSpace(tok.ParsedString()) if len(s) != len(tok.ParsedString()) { - return pref.Value{}, false + return protoreflect.Value{}, false } dec := json.NewDecoder([]byte(s)) tok, err := dec.Read() if err != nil { - return pref.Value{}, false + return protoreflect.Value{}, false } return getUint(tok, bitSize) } - return pref.Value{}, false + return protoreflect.Value{}, false } -func getUint(tok json.Token, bitSize int) (pref.Value, bool) { +func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { n, ok := tok.Uint(bitSize) if !ok { - return pref.Value{}, false + return protoreflect.Value{}, false } if bitSize == 32 { - return pref.ValueOfUint32(uint32(n)), true + return protoreflect.ValueOfUint32(uint32(n)), true } - return pref.ValueOfUint64(n), true + return protoreflect.ValueOfUint64(n), true } -func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) { +func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { switch tok.Kind() { case json.Number: return getFloat(tok, bitSize) @@ -414,49 +414,49 @@ func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) { switch s { case "NaN": if bitSize == 32 { - return pref.ValueOfFloat32(float32(math.NaN())), true + return protoreflect.ValueOfFloat32(float32(math.NaN())), true } - return pref.ValueOfFloat64(math.NaN()), true + return protoreflect.ValueOfFloat64(math.NaN()), true case "Infinity": if bitSize == 32 { - return pref.ValueOfFloat32(float32(math.Inf(+1))), true + return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true } - return pref.ValueOfFloat64(math.Inf(+1)), true + return protoreflect.ValueOfFloat64(math.Inf(+1)), true case "-Infinity": if bitSize == 32 { - return pref.ValueOfFloat32(float32(math.Inf(-1))), true + return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true } - return pref.ValueOfFloat64(math.Inf(-1)), true + return protoreflect.ValueOfFloat64(math.Inf(-1)), true } // Decode number from string. if len(s) != len(strings.TrimSpace(s)) { - return pref.Value{}, false + return protoreflect.Value{}, false } dec := json.NewDecoder([]byte(s)) tok, err := dec.Read() if err != nil { - return pref.Value{}, false + return protoreflect.Value{}, false } return getFloat(tok, bitSize) } - return pref.Value{}, false + return protoreflect.Value{}, false } -func getFloat(tok json.Token, bitSize int) (pref.Value, bool) { +func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { n, ok := tok.Float(bitSize) if !ok { - return pref.Value{}, false + return protoreflect.Value{}, false } if bitSize == 32 { - return pref.ValueOfFloat32(float32(n)), true + return protoreflect.ValueOfFloat32(float32(n)), true } - return pref.ValueOfFloat64(n), true + return protoreflect.ValueOfFloat64(n), true } -func unmarshalBytes(tok json.Token) (pref.Value, bool) { +func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { if tok.Kind() != json.String { - return pref.Value{}, false + return protoreflect.Value{}, false } s := tok.ParsedString() @@ -469,36 +469,36 @@ func unmarshalBytes(tok json.Token) (pref.Value, bool) { } b, err := enc.DecodeString(s) if err != nil { - return pref.Value{}, false + return protoreflect.Value{}, false } - return pref.ValueOfBytes(b), true + return protoreflect.ValueOfBytes(b), true } -func unmarshalEnum(tok json.Token, fd pref.FieldDescriptor) (pref.Value, bool) { +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) { switch tok.Kind() { case json.String: // Lookup EnumNumber based on name. s := tok.ParsedString() - if enumVal := fd.Enum().Values().ByName(pref.Name(s)); enumVal != nil { - return pref.ValueOfEnum(enumVal.Number()), true + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), true } case json.Number: if n, ok := tok.Int(32); ok { - return pref.ValueOfEnum(pref.EnumNumber(n)), true + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true } case json.Null: // This is only valid for google.protobuf.NullValue. if isNullValue(fd) { - return pref.ValueOfEnum(0), true + return protoreflect.ValueOfEnum(0), true } } - return pref.Value{}, false + return protoreflect.Value{}, false } -func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error { +func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { tok, err := d.Read() if err != nil { return err @@ -508,7 +508,7 @@ func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error { } switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: for { tok, err := d.Peek() if err != nil { @@ -549,7 +549,7 @@ func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error { return nil } -func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { +func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { tok, err := d.Read() if err != nil { return err @@ -561,18 +561,18 @@ func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { // Determine ahead whether map entry is a scalar type or a message type in // order to call the appropriate unmarshalMapValue func inside the for loop // below. - var unmarshalMapValue func() (pref.Value, error) + var unmarshalMapValue func() (protoreflect.Value, error) switch fd.MapValue().Kind() { - case pref.MessageKind, pref.GroupKind: - unmarshalMapValue = func() (pref.Value, error) { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { val := mmap.NewValue() if err := d.unmarshalMessage(val.Message(), false); err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } return val, nil } default: - unmarshalMapValue = func() (pref.Value, error) { + unmarshalMapValue = func() (protoreflect.Value, error) { return d.unmarshalScalar(fd.MapValue()) } } @@ -618,7 +618,7 @@ Loop: // unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. // A map key type is any integral or string type. -func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref.MapKey, error) { +func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) { const b32 = 32 const b64 = 64 const base10 = 10 @@ -626,40 +626,40 @@ func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref. name := tok.Name() kind := fd.Kind() switch kind { - case pref.StringKind: - return pref.ValueOfString(name).MapKey(), nil + case protoreflect.StringKind: + return protoreflect.ValueOfString(name).MapKey(), nil - case pref.BoolKind: + case protoreflect.BoolKind: switch name { case "true": - return pref.ValueOfBool(true).MapKey(), nil + return protoreflect.ValueOfBool(true).MapKey(), nil case "false": - return pref.ValueOfBool(false).MapKey(), nil + return protoreflect.ValueOfBool(false).MapKey(), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if n, err := strconv.ParseInt(name, base10, b32); err == nil { - return pref.ValueOfInt32(int32(n)).MapKey(), nil + return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if n, err := strconv.ParseInt(name, base10, b64); err == nil { - return pref.ValueOfInt64(int64(n)).MapKey(), nil + return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if n, err := strconv.ParseUint(name, base10, b32); err == nil { - return pref.ValueOfUint32(uint32(n)).MapKey(), nil + return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if n, err := strconv.ParseUint(name, base10, b64); err == nil { - return pref.ValueOfUint64(uint64(n)).MapKey(), nil + return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil } default: panic(fmt.Sprintf("invalid kind for map key: %v", kind)) } - return pref.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) + return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) } diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index ba971f07810c6..d09d22e139bce 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -18,7 +18,6 @@ import ( "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -164,8 +163,8 @@ type typeURLFieldRanger struct { typeURL string } -func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) { - if !f(typeFieldDesc, pref.ValueOfString(m.typeURL)) { +func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) { return } m.FieldRanger.Range(f) @@ -173,9 +172,9 @@ func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) // unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range // method to additionally iterate over unpopulated fields. -type unpopulatedFieldRanger struct{ pref.Message } +type unpopulatedFieldRanger struct{ protoreflect.Message } -func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) { +func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { fds := m.Descriptor().Fields() for i := 0; i < fds.Len(); i++ { fd := fds.Get(i) @@ -184,10 +183,10 @@ func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) b } v := m.Get(fd) - isProto2Scalar := fd.Syntax() == pref.Proto2 && fd.Default().IsValid() - isSingularMessage := fd.Cardinality() != pref.Repeated && fd.Message() != nil + isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() + isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil if isProto2Scalar || isSingularMessage { - v = pref.Value{} // use invalid value to emit null + v = protoreflect.Value{} // use invalid value to emit null } if !f(fd, v) { return @@ -199,7 +198,7 @@ func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) b // marshalMessage marshals the fields in the given protoreflect.Message. // If the typeURL is non-empty, then a synthetic "@type" field is injected // containing the URL as the value. -func (e encoder) marshalMessage(m pref.Message, typeURL string) error { +func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { return errors.New("no support for proto1 MessageSets") } @@ -220,7 +219,7 @@ func (e encoder) marshalMessage(m pref.Message, typeURL string) error { } var err error - order.RangeFields(fields, order.IndexNameFieldOrder, func(fd pref.FieldDescriptor, v pref.Value) bool { + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { name := fd.JSONName() if e.opts.UseProtoNames { name = fd.TextName() @@ -238,7 +237,7 @@ func (e encoder) marshalMessage(m pref.Message, typeURL string) error { } // marshalValue marshals the given protoreflect.Value. -func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { switch { case fd.IsList(): return e.marshalList(val.List(), fd) @@ -251,44 +250,44 @@ func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error { // marshalSingular marshals the given non-repeated field value. This includes // all scalar types, enums, messages, and groups. -func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { if !val.IsValid() { e.WriteNull() return nil } switch kind := fd.Kind(); kind { - case pref.BoolKind: + case protoreflect.BoolKind: e.WriteBool(val.Bool()) - case pref.StringKind: + case protoreflect.StringKind: if e.WriteString(val.String()) != nil { return errors.InvalidUTF8(string(fd.FullName())) } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: e.WriteInt(val.Int()) - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: e.WriteUint(val.Uint()) - case pref.Int64Kind, pref.Sint64Kind, pref.Uint64Kind, - pref.Sfixed64Kind, pref.Fixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind, + protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind: // 64-bit integers are written out as JSON string. e.WriteString(val.String()) - case pref.FloatKind: + case protoreflect.FloatKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 32) - case pref.DoubleKind: + case protoreflect.DoubleKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 64) - case pref.BytesKind: + case protoreflect.BytesKind: e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) - case pref.EnumKind: + case protoreflect.EnumKind: if fd.Enum().FullName() == genid.NullValue_enum_fullname { e.WriteNull() } else { @@ -300,7 +299,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error } } - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: if err := e.marshalMessage(val.Message(), ""); err != nil { return err } @@ -312,7 +311,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error } // marshalList marshals the given protoreflect.List. -func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error { +func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { e.StartArray() defer e.EndArray() @@ -326,12 +325,12 @@ func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error { } // marshalMap marshals given protoreflect.Map. -func (e encoder) marshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { +func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { e.StartObject() defer e.EndObject() var err error - order.RangeEntries(mmap, order.GenericKeyOrder, func(k pref.MapKey, v pref.Value) bool { + order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool { if err = e.WriteName(k.String()); err != nil { return false } diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 72924a9050cfb..c85f8469480a7 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -17,14 +17,14 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) -type marshalFunc func(encoder, pref.Message) error +type marshalFunc func(encoder, protoreflect.Message) error // wellKnownTypeMarshaler returns a marshal function if the message type // has specialized serialization behavior. It returns nil otherwise. -func wellKnownTypeMarshaler(name pref.FullName) marshalFunc { +func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc { if name.Parent() == genid.GoogleProtobuf_package { switch name.Name() { case genid.Any_message_name: @@ -58,11 +58,11 @@ func wellKnownTypeMarshaler(name pref.FullName) marshalFunc { return nil } -type unmarshalFunc func(decoder, pref.Message) error +type unmarshalFunc func(decoder, protoreflect.Message) error // wellKnownTypeUnmarshaler returns a unmarshal function if the message type // has specialized serialization behavior. It returns nil otherwise. -func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc { +func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc { if name.Parent() == genid.GoogleProtobuf_package { switch name.Name() { case genid.Any_message_name: @@ -102,7 +102,7 @@ func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc { // custom JSON representation, that representation will be embedded adding a // field `value` which holds the custom JSON in addition to the `@type` field. -func (e encoder) marshalAny(m pref.Message) error { +func (e encoder) marshalAny(m protoreflect.Message) error { fds := m.Descriptor().Fields() fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) fdValue := fds.ByNumber(genid.Any_Value_field_number) @@ -163,7 +163,7 @@ func (e encoder) marshalAny(m pref.Message) error { return nil } -func (d decoder) unmarshalAny(m pref.Message) error { +func (d decoder) unmarshalAny(m protoreflect.Message) error { // Peek to check for json.ObjectOpen to avoid advancing a read. start, err := d.Peek() if err != nil { @@ -233,8 +233,8 @@ func (d decoder) unmarshalAny(m pref.Message) error { fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) fdValue := fds.ByNumber(genid.Any_Value_field_number) - m.Set(fdType, pref.ValueOfString(typeURL)) - m.Set(fdValue, pref.ValueOfBytes(b)) + m.Set(fdType, protoreflect.ValueOfString(typeURL)) + m.Set(fdValue, protoreflect.ValueOfBytes(b)) return nil } @@ -354,7 +354,7 @@ func (d decoder) skipJSONValue() error { // unmarshalAnyValue unmarshals the given custom-type message from the JSON // object's "value" field. -func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) error { +func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error { // Skip ObjectOpen, and start reading the fields. d.Read() @@ -402,13 +402,13 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) erro // Wrapper types are encoded as JSON primitives like string, number or boolean. -func (e encoder) marshalWrapperType(m pref.Message) error { +func (e encoder) marshalWrapperType(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) val := m.Get(fd) return e.marshalSingular(val, fd) } -func (d decoder) unmarshalWrapperType(m pref.Message) error { +func (d decoder) unmarshalWrapperType(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) val, err := d.unmarshalScalar(fd) if err != nil { @@ -420,13 +420,13 @@ func (d decoder) unmarshalWrapperType(m pref.Message) error { // The JSON representation for Empty is an empty JSON object. -func (e encoder) marshalEmpty(pref.Message) error { +func (e encoder) marshalEmpty(protoreflect.Message) error { e.StartObject() e.EndObject() return nil } -func (d decoder) unmarshalEmpty(pref.Message) error { +func (d decoder) unmarshalEmpty(protoreflect.Message) error { tok, err := d.Read() if err != nil { return err @@ -462,12 +462,12 @@ func (d decoder) unmarshalEmpty(pref.Message) error { // The JSON representation for Struct is a JSON object that contains the encoded // Struct.fields map and follows the serialization rules for a map. -func (e encoder) marshalStruct(m pref.Message) error { +func (e encoder) marshalStruct(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) return e.marshalMap(m.Get(fd).Map(), fd) } -func (d decoder) unmarshalStruct(m pref.Message) error { +func (d decoder) unmarshalStruct(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) return d.unmarshalMap(m.Mutable(fd).Map(), fd) } @@ -476,12 +476,12 @@ func (d decoder) unmarshalStruct(m pref.Message) error { // ListValue.values repeated field and follows the serialization rules for a // repeated field. -func (e encoder) marshalListValue(m pref.Message) error { +func (e encoder) marshalListValue(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) return e.marshalList(m.Get(fd).List(), fd) } -func (d decoder) unmarshalListValue(m pref.Message) error { +func (d decoder) unmarshalListValue(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) return d.unmarshalList(m.Mutable(fd).List(), fd) } @@ -490,7 +490,7 @@ func (d decoder) unmarshalListValue(m pref.Message) error { // set. Each of the field in the oneof has its own custom serialization rule. A // Value message needs to be a oneof field set, else it is an error. -func (e encoder) marshalKnownValue(m pref.Message) error { +func (e encoder) marshalKnownValue(m protoreflect.Message) error { od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) fd := m.WhichOneof(od) if fd == nil { @@ -504,19 +504,19 @@ func (e encoder) marshalKnownValue(m pref.Message) error { return e.marshalSingular(m.Get(fd), fd) } -func (d decoder) unmarshalKnownValue(m pref.Message) error { +func (d decoder) unmarshalKnownValue(m protoreflect.Message) error { tok, err := d.Peek() if err != nil { return err } - var fd pref.FieldDescriptor - var val pref.Value + var fd protoreflect.FieldDescriptor + var val protoreflect.Value switch tok.Kind() { case json.Null: d.Read() fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) - val = pref.ValueOfEnum(0) + val = protoreflect.ValueOfEnum(0) case json.Bool: tok, err := d.Read() @@ -524,7 +524,7 @@ func (d decoder) unmarshalKnownValue(m pref.Message) error { return err } fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) - val = pref.ValueOfBool(tok.Bool()) + val = protoreflect.ValueOfBool(tok.Bool()) case json.Number: tok, err := d.Read() @@ -550,7 +550,7 @@ func (d decoder) unmarshalKnownValue(m pref.Message) error { return err } fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) - val = pref.ValueOfString(tok.ParsedString()) + val = protoreflect.ValueOfString(tok.ParsedString()) case json.ObjectOpen: fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) @@ -591,7 +591,7 @@ const ( maxSecondsInDuration = 315576000000 ) -func (e encoder) marshalDuration(m pref.Message) error { +func (e encoder) marshalDuration(m protoreflect.Message) error { fds := m.Descriptor().Fields() fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) @@ -623,7 +623,7 @@ func (e encoder) marshalDuration(m pref.Message) error { return nil } -func (d decoder) unmarshalDuration(m pref.Message) error { +func (d decoder) unmarshalDuration(m protoreflect.Message) error { tok, err := d.Read() if err != nil { return err @@ -646,8 +646,8 @@ func (d decoder) unmarshalDuration(m pref.Message) error { fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) - m.Set(fdSeconds, pref.ValueOfInt64(secs)) - m.Set(fdNanos, pref.ValueOfInt32(nanos)) + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(nanos)) return nil } @@ -779,7 +779,7 @@ const ( minTimestampSeconds = -62135596800 ) -func (e encoder) marshalTimestamp(m pref.Message) error { +func (e encoder) marshalTimestamp(m protoreflect.Message) error { fds := m.Descriptor().Fields() fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) @@ -805,7 +805,7 @@ func (e encoder) marshalTimestamp(m pref.Message) error { return nil } -func (d decoder) unmarshalTimestamp(m pref.Message) error { +func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { tok, err := d.Read() if err != nil { return err @@ -829,8 +829,8 @@ func (d decoder) unmarshalTimestamp(m pref.Message) error { fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) - m.Set(fdSeconds, pref.ValueOfInt64(secs)) - m.Set(fdNanos, pref.ValueOfInt32(int32(t.Nanosecond()))) + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond()))) return nil } @@ -839,14 +839,14 @@ func (d decoder) unmarshalTimestamp(m pref.Message) error { // lower-camel naming conventions. Encoding should fail if the path name would // end up differently after a round-trip. -func (e encoder) marshalFieldMask(m pref.Message) error { +func (e encoder) marshalFieldMask(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) list := m.Get(fd).List() paths := make([]string, 0, list.Len()) for i := 0; i < list.Len(); i++ { s := list.Get(i).String() - if !pref.FullName(s).IsValid() { + if !protoreflect.FullName(s).IsValid() { return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) } // Return error if conversion to camelCase is not reversible. @@ -861,7 +861,7 @@ func (e encoder) marshalFieldMask(m pref.Message) error { return nil } -func (d decoder) unmarshalFieldMask(m pref.Message) error { +func (d decoder) unmarshalFieldMask(m protoreflect.Message) error { tok, err := d.Read() if err != nil { return err @@ -880,10 +880,10 @@ func (d decoder) unmarshalFieldMask(m pref.Message) error { for _, s0 := range paths { s := strs.JSONSnakeCase(s0) - if strings.Contains(s0, "_") || !pref.FullName(s).IsValid() { + if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() { return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) } - list.Append(pref.ValueOfString(s)) + list.Append(protoreflect.ValueOfString(s)) } return nil } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 179d6e8fc1ce0..4921b2d4a76f1 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -17,7 +17,7 @@ import ( "google.golang.org/protobuf/internal/set" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -103,7 +103,7 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { } // unmarshalMessage unmarshals into the given protoreflect.Message. -func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { +func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) error { messageDesc := m.Descriptor() if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { return errors.New("no support for proto1 MessageSets") @@ -150,24 +150,24 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { } // Resolve the field descriptor. - var name pref.Name - var fd pref.FieldDescriptor - var xt pref.ExtensionType + var name protoreflect.Name + var fd protoreflect.FieldDescriptor + var xt protoreflect.ExtensionType var xtErr error var isFieldNumberName bool switch tok.NameKind() { case text.IdentName: - name = pref.Name(tok.IdentName()) + name = protoreflect.Name(tok.IdentName()) fd = fieldDescs.ByTextName(string(name)) case text.TypeName: // Handle extensions only. This code path is not for Any. - xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName())) + xt, xtErr = d.opts.Resolver.FindExtensionByName(protoreflect.FullName(tok.TypeName())) case text.FieldNumber: isFieldNumberName = true - num := pref.FieldNumber(tok.FieldNumber()) + num := protoreflect.FieldNumber(tok.FieldNumber()) if !num.IsValid() { return d.newError(tok.Pos(), "invalid field number: %d", num) } @@ -215,7 +215,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { switch { case fd.IsList(): kind := fd.Kind() - if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } @@ -232,7 +232,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { default: kind := fd.Kind() - if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } @@ -262,11 +262,11 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { // unmarshalSingular unmarshals a non-repeated field value specified by the // given FieldDescriptor. -func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { - var val pref.Value +func (d decoder) unmarshalSingular(fd protoreflect.FieldDescriptor, m protoreflect.Message) error { + var val protoreflect.Value var err error switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: val = m.NewField(fd) err = d.unmarshalMessage(val.Message(), true) default: @@ -280,94 +280,94 @@ func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) erro // unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the // given FieldDescriptor. -func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { tok, err := d.Read() if err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } if tok.Kind() != text.Scalar { - return pref.Value{}, d.unexpectedTokenError(tok) + return protoreflect.Value{}, d.unexpectedTokenError(tok) } kind := fd.Kind() switch kind { - case pref.BoolKind: + case protoreflect.BoolKind: if b, ok := tok.Bool(); ok { - return pref.ValueOfBool(b), nil + return protoreflect.ValueOfBool(b), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if n, ok := tok.Int32(); ok { - return pref.ValueOfInt32(n), nil + return protoreflect.ValueOfInt32(n), nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if n, ok := tok.Int64(); ok { - return pref.ValueOfInt64(n), nil + return protoreflect.ValueOfInt64(n), nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if n, ok := tok.Uint32(); ok { - return pref.ValueOfUint32(n), nil + return protoreflect.ValueOfUint32(n), nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if n, ok := tok.Uint64(); ok { - return pref.ValueOfUint64(n), nil + return protoreflect.ValueOfUint64(n), nil } - case pref.FloatKind: + case protoreflect.FloatKind: if n, ok := tok.Float32(); ok { - return pref.ValueOfFloat32(n), nil + return protoreflect.ValueOfFloat32(n), nil } - case pref.DoubleKind: + case protoreflect.DoubleKind: if n, ok := tok.Float64(); ok { - return pref.ValueOfFloat64(n), nil + return protoreflect.ValueOfFloat64(n), nil } - case pref.StringKind: + case protoreflect.StringKind: if s, ok := tok.String(); ok { if strs.EnforceUTF8(fd) && !utf8.ValidString(s) { - return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") + return protoreflect.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") } - return pref.ValueOfString(s), nil + return protoreflect.ValueOfString(s), nil } - case pref.BytesKind: + case protoreflect.BytesKind: if b, ok := tok.String(); ok { - return pref.ValueOfBytes([]byte(b)), nil + return protoreflect.ValueOfBytes([]byte(b)), nil } - case pref.EnumKind: + case protoreflect.EnumKind: if lit, ok := tok.Enum(); ok { // Lookup EnumNumber based on name. - if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil { - return pref.ValueOfEnum(enumVal.Number()), nil + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(lit)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), nil } } if num, ok := tok.Int32(); ok { - return pref.ValueOfEnum(pref.EnumNumber(num)), nil + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(num)), nil } default: panic(fmt.Sprintf("invalid scalar kind %v", kind)) } - return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) } // unmarshalList unmarshals into given protoreflect.List. A list value can // either be in [] syntax or simply just a single scalar/message value. -func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { +func (d decoder) unmarshalList(fd protoreflect.FieldDescriptor, list protoreflect.List) error { tok, err := d.Peek() if err != nil { return err } switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: switch tok.Kind() { case text.ListOpen: d.Read() @@ -441,22 +441,22 @@ func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { // unmarshalMap unmarshals into given protoreflect.Map. A map value is a // textproto message containing {key: , value: }. -func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { +func (d decoder) unmarshalMap(fd protoreflect.FieldDescriptor, mmap protoreflect.Map) error { // Determine ahead whether map entry is a scalar type or a message type in // order to call the appropriate unmarshalMapValue func inside // unmarshalMapEntry. - var unmarshalMapValue func() (pref.Value, error) + var unmarshalMapValue func() (protoreflect.Value, error) switch fd.MapValue().Kind() { - case pref.MessageKind, pref.GroupKind: - unmarshalMapValue = func() (pref.Value, error) { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { pval := mmap.NewValue() if err := d.unmarshalMessage(pval.Message(), true); err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } return pval, nil } default: - unmarshalMapValue = func() (pref.Value, error) { + unmarshalMapValue = func() (protoreflect.Value, error) { return d.unmarshalScalar(fd.MapValue()) } } @@ -494,9 +494,9 @@ func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { // unmarshalMap unmarshals into given protoreflect.Map. A map value is a // textproto message containing {key: , value: }. -func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error { - var key pref.MapKey - var pval pref.Value +func (d decoder) unmarshalMapEntry(fd protoreflect.FieldDescriptor, mmap protoreflect.Map, unmarshalMapValue func() (protoreflect.Value, error)) error { + var key protoreflect.MapKey + var pval protoreflect.Value Loop: for { // Read field name. @@ -520,7 +520,7 @@ Loop: return d.unexpectedTokenError(tok) } - switch name := pref.Name(tok.IdentName()); name { + switch name := protoreflect.Name(tok.IdentName()); name { case genid.MapEntry_Key_field_name: if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") @@ -535,7 +535,7 @@ Loop: key = val.MapKey() case genid.MapEntry_Value_field_name: - if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) { + if kind := fd.MapValue().Kind(); (kind != protoreflect.MessageKind) && (kind != protoreflect.GroupKind) { if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } @@ -561,7 +561,7 @@ Loop: } if !pval.IsValid() { switch fd.MapValue().Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: // If value field is not set for message/group types, construct an // empty one as default. pval = mmap.NewValue() @@ -575,7 +575,7 @@ Loop: // unmarshalAny unmarshals an Any textproto. It can either be in expanded form // or non-expanded form. -func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error { +func (d decoder) unmarshalAny(m protoreflect.Message, checkDelims bool) error { var typeURL string var bValue []byte var seenTypeUrl bool @@ -619,7 +619,7 @@ Loop: return d.syntaxError(tok.Pos(), "missing field separator :") } - switch name := pref.Name(tok.IdentName()); name { + switch name := protoreflect.Name(tok.IdentName()); name { case genid.Any_TypeUrl_field_name: if seenTypeUrl { return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname) @@ -686,10 +686,10 @@ Loop: fds := m.Descriptor().Fields() if len(typeURL) > 0 { - m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL)) + m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), protoreflect.ValueOfString(typeURL)) } if len(bValue) > 0 { - m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue)) + m.Set(fds.ByNumber(genid.Any_Value_field_number), protoreflect.ValueOfBytes(bValue)) } return nil } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 8d5304dc5b320..ebf6c65284ddf 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -20,7 +20,6 @@ import ( "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -150,7 +149,7 @@ type encoder struct { } // marshalMessage marshals the given protoreflect.Message. -func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { +func (e encoder) marshalMessage(m protoreflect.Message, inclDelims bool) error { messageDesc := m.Descriptor() if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { return errors.New("no support for proto1 MessageSets") @@ -190,7 +189,7 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { } // marshalField marshals the given field with protoreflect.Value. -func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalField(name string, val protoreflect.Value, fd protoreflect.FieldDescriptor) error { switch { case fd.IsList(): return e.marshalList(name, val.List(), fd) @@ -204,40 +203,40 @@ func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescript // marshalSingular marshals the given non-repeated field value. This includes // all scalar types, enums, messages, and groups. -func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { kind := fd.Kind() switch kind { - case pref.BoolKind: + case protoreflect.BoolKind: e.WriteBool(val.Bool()) - case pref.StringKind: + case protoreflect.StringKind: s := val.String() if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) { return errors.InvalidUTF8(string(fd.FullName())) } e.WriteString(s) - case pref.Int32Kind, pref.Int64Kind, - pref.Sint32Kind, pref.Sint64Kind, - pref.Sfixed32Kind, pref.Sfixed64Kind: + case protoreflect.Int32Kind, protoreflect.Int64Kind, + protoreflect.Sint32Kind, protoreflect.Sint64Kind, + protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: e.WriteInt(val.Int()) - case pref.Uint32Kind, pref.Uint64Kind, - pref.Fixed32Kind, pref.Fixed64Kind: + case protoreflect.Uint32Kind, protoreflect.Uint64Kind, + protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: e.WriteUint(val.Uint()) - case pref.FloatKind: + case protoreflect.FloatKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 32) - case pref.DoubleKind: + case protoreflect.DoubleKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 64) - case pref.BytesKind: + case protoreflect.BytesKind: e.WriteString(string(val.Bytes())) - case pref.EnumKind: + case protoreflect.EnumKind: num := val.Enum() if desc := fd.Enum().Values().ByNumber(num); desc != nil { e.WriteLiteral(string(desc.Name())) @@ -246,7 +245,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error e.WriteInt(int64(num)) } - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: return e.marshalMessage(val.Message(), true) default: @@ -256,7 +255,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error } // marshalList marshals the given protoreflect.List as multiple name-value fields. -func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error { +func (e encoder) marshalList(name string, list protoreflect.List, fd protoreflect.FieldDescriptor) error { size := list.Len() for i := 0; i < size; i++ { e.WriteName(name) @@ -268,9 +267,9 @@ func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescripto } // marshalMap marshals the given protoreflect.Map as multiple name-value fields. -func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { +func (e encoder) marshalMap(name string, mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { var err error - order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool { + order.RangeEntries(mmap, order.GenericKeyOrder, func(key protoreflect.MapKey, val protoreflect.Value) bool { e.WriteName(name) e.StartMessage() defer e.EndMessage() @@ -334,7 +333,7 @@ func (e encoder) marshalUnknown(b []byte) { // marshalAny marshals the given google.protobuf.Any message in expanded form. // It returns true if it was able to marshal, else false. -func (e encoder) marshalAny(any pref.Message) bool { +func (e encoder) marshalAny(any protoreflect.Message) bool { // Construct the embedded message. fds := any.Descriptor().Fields() fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index 9c61112f58d13..ce57f57ebd486 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -516,6 +516,7 @@ func EncodeTag(num Number, typ Type) uint64 { } // DecodeZigZag decodes a zig-zag-encoded uint64 as an int64. +// // Input: {…, 5, 3, 1, 0, 2, 4, 6, …} // Output: {…, -3, -2, -1, 0, +1, +2, +3, …} func DecodeZigZag(x uint64) int64 { @@ -523,6 +524,7 @@ func DecodeZigZag(x uint64) int64 { } // EncodeZigZag encodes an int64 as a zig-zag-encoded uint64. +// // Input: {…, -3, -2, -1, 0, +1, +2, +3, …} // Output: {…, 5, 3, 1, 0, 2, 4, 6, …} func EncodeZigZag(x int64) uint64 { @@ -530,6 +532,7 @@ func EncodeZigZag(x int64) uint64 { } // DecodeBool decodes a uint64 as a bool. +// // Input: { 0, 1, 2, …} // Output: {false, true, true, …} func DecodeBool(x uint64) bool { @@ -537,6 +540,7 @@ func DecodeBool(x uint64) bool { } // EncodeBool encodes a bool as a uint64. +// // Input: {false, true} // Output: { 0, 1} func EncodeBool(x bool) uint64 { diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index 360c63329d4dc..db5248e1b512b 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -14,7 +14,7 @@ import ( "google.golang.org/protobuf/internal/detrand" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type list interface { @@ -30,17 +30,17 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { if isRoot { var name string switch vs.(type) { - case pref.Names: + case protoreflect.Names: name = "Names" - case pref.FieldNumbers: + case protoreflect.FieldNumbers: name = "FieldNumbers" - case pref.FieldRanges: + case protoreflect.FieldRanges: name = "FieldRanges" - case pref.EnumRanges: + case protoreflect.EnumRanges: name = "EnumRanges" - case pref.FileImports: + case protoreflect.FileImports: name = "FileImports" - case pref.Descriptor: + case protoreflect.Descriptor: name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" default: name = reflect.ValueOf(vs).Elem().Type().Name() @@ -50,17 +50,17 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { var ss []string switch vs := vs.(type) { - case pref.Names: + case protoreflect.Names: for i := 0; i < vs.Len(); i++ { ss = append(ss, fmt.Sprint(vs.Get(i))) } return start + joinStrings(ss, false) + end - case pref.FieldNumbers: + case protoreflect.FieldNumbers: for i := 0; i < vs.Len(); i++ { ss = append(ss, fmt.Sprint(vs.Get(i))) } return start + joinStrings(ss, false) + end - case pref.FieldRanges: + case protoreflect.FieldRanges: for i := 0; i < vs.Len(); i++ { r := vs.Get(i) if r[0]+1 == r[1] { @@ -70,7 +70,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { } } return start + joinStrings(ss, false) + end - case pref.EnumRanges: + case protoreflect.EnumRanges: for i := 0; i < vs.Len(); i++ { r := vs.Get(i) if r[0] == r[1] { @@ -80,7 +80,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { } } return start + joinStrings(ss, false) + end - case pref.FileImports: + case protoreflect.FileImports: for i := 0; i < vs.Len(); i++ { var rs records rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") @@ -88,11 +88,11 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { } return start + joinStrings(ss, allowMulti) + end default: - _, isEnumValue := vs.(pref.EnumValueDescriptors) + _, isEnumValue := vs.(protoreflect.EnumValueDescriptors) for i := 0; i < vs.Len(); i++ { m := reflect.ValueOf(vs).MethodByName("Get") v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() - ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue)) + ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue)) } return start + joinStrings(ss, allowMulti && isEnumValue) + end } @@ -106,20 +106,20 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { // // Using a list allows us to print the accessors in a sensible order. var descriptorAccessors = map[reflect.Type][]string{ - reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, - reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, - reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, - reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt - reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, - reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"}, - reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"}, - reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, + reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, + reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, + reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, + reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt + reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, + reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"}, + reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"}, + reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, } -func FormatDesc(s fmt.State, r rune, t pref.Descriptor) { +func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) { io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) } -func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { +func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { rv := reflect.ValueOf(t) rt := rv.MethodByName("ProtoType").Type().In(0) @@ -128,7 +128,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { start = rt.Name() + "{" } - _, isFile := t.(pref.FileDescriptor) + _, isFile := t.(protoreflect.FileDescriptor) rs := records{allowMulti: allowMulti} if t.IsPlaceholder() { if isFile { @@ -146,7 +146,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { rs.Append(rv, "Name") } switch t := t.(type) { - case pref.FieldDescriptor: + case protoreflect.FieldDescriptor: for _, s := range descriptorAccessors[rt] { switch s { case "MapKey": @@ -156,9 +156,9 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { case "MapValue": if v := t.MapValue(); v != nil { switch v.Kind() { - case pref.EnumKind: + case protoreflect.EnumKind: rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) default: rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) @@ -180,7 +180,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { rs.Append(rv, s) } } - case pref.OneofDescriptor: + case protoreflect.OneofDescriptor: var ss []string fs := t.Fields() for i := 0; i < fs.Len(); i++ { @@ -216,7 +216,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { if !rv.IsValid() { panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) } - if _, ok := rv.Interface().(pref.Value); ok { + if _, ok := rv.Interface().(protoreflect.Value); ok { rv = rv.MethodByName("Interface").Call(nil)[0] if !rv.IsNil() { rv = rv.Elem() @@ -250,9 +250,9 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { switch v := v.(type) { case list: s = formatListOpt(v, false, rs.allowMulti) - case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor: - s = string(v.(pref.Descriptor).Name()) - case pref.Descriptor: + case protoreflect.FieldDescriptor, protoreflect.OneofDescriptor, protoreflect.EnumValueDescriptor, protoreflect.MethodDescriptor: + s = string(v.(protoreflect.Descriptor).Name()) + case protoreflect.Descriptor: s = string(v.FullName()) case string: s = strconv.Quote(v) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go index fdd9b13f2fcff..328dc733b042d 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go @@ -15,8 +15,8 @@ import ( "strconv" ptext "google.golang.org/protobuf/internal/encoding/text" - errors "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" ) // Format is the serialization format used to represent the default value. @@ -35,56 +35,56 @@ const ( // Unmarshal deserializes the default string s according to the given kind k. // When k is an enum, a list of enum value descriptors must be provided. -func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) { +func Unmarshal(s string, k protoreflect.Kind, evs protoreflect.EnumValueDescriptors, f Format) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { switch k { - case pref.BoolKind: + case protoreflect.BoolKind: if f == GoTag { switch s { case "1": - return pref.ValueOfBool(true), nil, nil + return protoreflect.ValueOfBool(true), nil, nil case "0": - return pref.ValueOfBool(false), nil, nil + return protoreflect.ValueOfBool(false), nil, nil } } else { switch s { case "true": - return pref.ValueOfBool(true), nil, nil + return protoreflect.ValueOfBool(true), nil, nil case "false": - return pref.ValueOfBool(false), nil, nil + return protoreflect.ValueOfBool(false), nil, nil } } - case pref.EnumKind: + case protoreflect.EnumKind: if f == GoTag { // Go tags use the numeric form of the enum value. if n, err := strconv.ParseInt(s, 10, 32); err == nil { - if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil { - return pref.ValueOfEnum(ev.Number()), ev, nil + if ev := evs.ByNumber(protoreflect.EnumNumber(n)); ev != nil { + return protoreflect.ValueOfEnum(ev.Number()), ev, nil } } } else { // Descriptor default_value use the enum identifier. - ev := evs.ByName(pref.Name(s)) + ev := evs.ByName(protoreflect.Name(s)) if ev != nil { - return pref.ValueOfEnum(ev.Number()), ev, nil + return protoreflect.ValueOfEnum(ev.Number()), ev, nil } } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if v, err := strconv.ParseInt(s, 10, 32); err == nil { - return pref.ValueOfInt32(int32(v)), nil, nil + return protoreflect.ValueOfInt32(int32(v)), nil, nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if v, err := strconv.ParseInt(s, 10, 64); err == nil { - return pref.ValueOfInt64(int64(v)), nil, nil + return protoreflect.ValueOfInt64(int64(v)), nil, nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if v, err := strconv.ParseUint(s, 10, 32); err == nil { - return pref.ValueOfUint32(uint32(v)), nil, nil + return protoreflect.ValueOfUint32(uint32(v)), nil, nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if v, err := strconv.ParseUint(s, 10, 64); err == nil { - return pref.ValueOfUint64(uint64(v)), nil, nil + return protoreflect.ValueOfUint64(uint64(v)), nil, nil } - case pref.FloatKind, pref.DoubleKind: + case protoreflect.FloatKind, protoreflect.DoubleKind: var v float64 var err error switch s { @@ -98,29 +98,29 @@ func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) ( v, err = strconv.ParseFloat(s, 64) } if err == nil { - if k == pref.FloatKind { - return pref.ValueOfFloat32(float32(v)), nil, nil + if k == protoreflect.FloatKind { + return protoreflect.ValueOfFloat32(float32(v)), nil, nil } else { - return pref.ValueOfFloat64(float64(v)), nil, nil + return protoreflect.ValueOfFloat64(float64(v)), nil, nil } } - case pref.StringKind: + case protoreflect.StringKind: // String values are already unescaped and can be used as is. - return pref.ValueOfString(s), nil, nil - case pref.BytesKind: + return protoreflect.ValueOfString(s), nil, nil + case protoreflect.BytesKind: if b, ok := unmarshalBytes(s); ok { - return pref.ValueOfBytes(b), nil, nil + return protoreflect.ValueOfBytes(b), nil, nil } } - return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) + return protoreflect.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) } // Marshal serializes v as the default string according to the given kind k. // When specifying the Descriptor format for an enum kind, the associated // enum value descriptor must be provided. -func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) { +func Marshal(v protoreflect.Value, ev protoreflect.EnumValueDescriptor, k protoreflect.Kind, f Format) (string, error) { switch k { - case pref.BoolKind: + case protoreflect.BoolKind: if f == GoTag { if v.Bool() { return "1", nil @@ -134,17 +134,17 @@ func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) ( return "false", nil } } - case pref.EnumKind: + case protoreflect.EnumKind: if f == GoTag { return strconv.FormatInt(int64(v.Enum()), 10), nil } else { return string(ev.Name()), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: return strconv.FormatInt(v.Int(), 10), nil - case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: return strconv.FormatUint(v.Uint(), 10), nil - case pref.FloatKind, pref.DoubleKind: + case protoreflect.FloatKind, protoreflect.DoubleKind: f := v.Float() switch { case math.IsInf(f, -1): @@ -154,16 +154,16 @@ func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) ( case math.IsNaN(f): return "nan", nil default: - if k == pref.FloatKind { + if k == protoreflect.FloatKind { return strconv.FormatFloat(f, 'g', -1, 32), nil } else { return strconv.FormatFloat(f, 'g', -1, 64), nil } } - case pref.StringKind: + case protoreflect.StringKind: // String values are serialized as is without any escaping. return v.String(), nil - case pref.BytesKind: + case protoreflect.BytesKind: if s, ok := marshalBytes(v.Bytes()); ok { return s, nil } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go index c1866f3c1a78c..a6693f0a2f39c 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // The MessageSet wire format is equivalent to a message defined as follows, @@ -33,6 +33,7 @@ const ( // ExtensionName is the field name for extensions of MessageSet. // // A valid MessageSet extension must be of the form: +// // message MyMessage { // extend proto2.bridge.MessageSet { // optional MyMessage message_set_extension = 1234; @@ -42,13 +43,13 @@ const ( const ExtensionName = "message_set_extension" // IsMessageSet returns whether the message uses the MessageSet wire format. -func IsMessageSet(md pref.MessageDescriptor) bool { +func IsMessageSet(md protoreflect.MessageDescriptor) bool { xmd, ok := md.(interface{ IsMessageSet() bool }) return ok && xmd.IsMessageSet() } // IsMessageSetExtension reports this field properly extends a MessageSet. -func IsMessageSetExtension(fd pref.FieldDescriptor) bool { +func IsMessageSetExtension(fd protoreflect.FieldDescriptor) bool { switch { case fd.Name() != ExtensionName: return false diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 38f1931c6fd1a..373d208374f80 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -11,10 +11,10 @@ import ( "strconv" "strings" - defval "google.golang.org/protobuf/internal/encoding/defval" - fdesc "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) var byteType = reflect.TypeOf(byte(0)) @@ -29,9 +29,9 @@ var byteType = reflect.TypeOf(byte(0)) // This does not populate the Enum or Message (except for weak message). // // This function is a best effort attempt; parsing errors are ignored. -func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor { - f := new(fdesc.Field) - f.L0.ParentFile = fdesc.SurrogateProto2 +func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { + f := new(filedesc.Field) + f.L0.ParentFile = filedesc.SurrogateProto2 for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { @@ -39,68 +39,68 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p } switch s := tag[:i]; { case strings.HasPrefix(s, "name="): - f.L0.FullName = pref.FullName(s[len("name="):]) + f.L0.FullName = protoreflect.FullName(s[len("name="):]) case strings.Trim(s, "0123456789") == "": n, _ := strconv.ParseUint(s, 10, 32) - f.L1.Number = pref.FieldNumber(n) + f.L1.Number = protoreflect.FieldNumber(n) case s == "opt": - f.L1.Cardinality = pref.Optional + f.L1.Cardinality = protoreflect.Optional case s == "req": - f.L1.Cardinality = pref.Required + f.L1.Cardinality = protoreflect.Required case s == "rep": - f.L1.Cardinality = pref.Repeated + f.L1.Cardinality = protoreflect.Repeated case s == "varint": switch goType.Kind() { case reflect.Bool: - f.L1.Kind = pref.BoolKind + f.L1.Kind = protoreflect.BoolKind case reflect.Int32: - f.L1.Kind = pref.Int32Kind + f.L1.Kind = protoreflect.Int32Kind case reflect.Int64: - f.L1.Kind = pref.Int64Kind + f.L1.Kind = protoreflect.Int64Kind case reflect.Uint32: - f.L1.Kind = pref.Uint32Kind + f.L1.Kind = protoreflect.Uint32Kind case reflect.Uint64: - f.L1.Kind = pref.Uint64Kind + f.L1.Kind = protoreflect.Uint64Kind } case s == "zigzag32": if goType.Kind() == reflect.Int32 { - f.L1.Kind = pref.Sint32Kind + f.L1.Kind = protoreflect.Sint32Kind } case s == "zigzag64": if goType.Kind() == reflect.Int64 { - f.L1.Kind = pref.Sint64Kind + f.L1.Kind = protoreflect.Sint64Kind } case s == "fixed32": switch goType.Kind() { case reflect.Int32: - f.L1.Kind = pref.Sfixed32Kind + f.L1.Kind = protoreflect.Sfixed32Kind case reflect.Uint32: - f.L1.Kind = pref.Fixed32Kind + f.L1.Kind = protoreflect.Fixed32Kind case reflect.Float32: - f.L1.Kind = pref.FloatKind + f.L1.Kind = protoreflect.FloatKind } case s == "fixed64": switch goType.Kind() { case reflect.Int64: - f.L1.Kind = pref.Sfixed64Kind + f.L1.Kind = protoreflect.Sfixed64Kind case reflect.Uint64: - f.L1.Kind = pref.Fixed64Kind + f.L1.Kind = protoreflect.Fixed64Kind case reflect.Float64: - f.L1.Kind = pref.DoubleKind + f.L1.Kind = protoreflect.DoubleKind } case s == "bytes": switch { case goType.Kind() == reflect.String: - f.L1.Kind = pref.StringKind + f.L1.Kind = protoreflect.StringKind case goType.Kind() == reflect.Slice && goType.Elem() == byteType: - f.L1.Kind = pref.BytesKind + f.L1.Kind = protoreflect.BytesKind default: - f.L1.Kind = pref.MessageKind + f.L1.Kind = protoreflect.MessageKind } case s == "group": - f.L1.Kind = pref.GroupKind + f.L1.Kind = protoreflect.GroupKind case strings.HasPrefix(s, "enum="): - f.L1.Kind = pref.EnumKind + f.L1.Kind = protoreflect.EnumKind case strings.HasPrefix(s, "json="): jsonName := s[len("json="):] if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { @@ -111,23 +111,23 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p f.L1.IsPacked = true case strings.HasPrefix(s, "weak="): f.L1.IsWeak = true - f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):])) + f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) case strings.HasPrefix(s, "def="): // The default tag is special in that everything afterwards is the // default regardless of the presence of commas. s, i = tag[len("def="):], len(tag) v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag) - f.L1.Default = fdesc.DefaultValue(v, ev) + f.L1.Default = filedesc.DefaultValue(v, ev) case s == "proto3": - f.L0.ParentFile = fdesc.SurrogateProto3 + f.L0.ParentFile = filedesc.SurrogateProto3 } tag = strings.TrimPrefix(tag[i:], ",") } // The generator uses the group message name instead of the field name. // We obtain the real field name by lowercasing the group name. - if f.L1.Kind == pref.GroupKind { - f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName))) + if f.L1.Kind == protoreflect.GroupKind { + f.L0.FullName = protoreflect.FullName(strings.ToLower(string(f.L0.FullName))) } return f } @@ -140,38 +140,38 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p // Depending on the context on how Marshal is called, there are different ways // through which that information is determined. As such it is the caller's // responsibility to provide a function to obtain that information. -func Marshal(fd pref.FieldDescriptor, enumName string) string { +func Marshal(fd protoreflect.FieldDescriptor, enumName string) string { var tag []string switch fd.Kind() { - case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind: + case protoreflect.BoolKind, protoreflect.EnumKind, protoreflect.Int32Kind, protoreflect.Uint32Kind, protoreflect.Int64Kind, protoreflect.Uint64Kind: tag = append(tag, "varint") - case pref.Sint32Kind: + case protoreflect.Sint32Kind: tag = append(tag, "zigzag32") - case pref.Sint64Kind: + case protoreflect.Sint64Kind: tag = append(tag, "zigzag64") - case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind: + case protoreflect.Sfixed32Kind, protoreflect.Fixed32Kind, protoreflect.FloatKind: tag = append(tag, "fixed32") - case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind: + case protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind, protoreflect.DoubleKind: tag = append(tag, "fixed64") - case pref.StringKind, pref.BytesKind, pref.MessageKind: + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind: tag = append(tag, "bytes") - case pref.GroupKind: + case protoreflect.GroupKind: tag = append(tag, "group") } tag = append(tag, strconv.Itoa(int(fd.Number()))) switch fd.Cardinality() { - case pref.Optional: + case protoreflect.Optional: tag = append(tag, "opt") - case pref.Required: + case protoreflect.Required: tag = append(tag, "req") - case pref.Repeated: + case protoreflect.Repeated: tag = append(tag, "rep") } if fd.IsPacked() { tag = append(tag, "packed") } name := string(fd.Name()) - if fd.Kind() == pref.GroupKind { + if fd.Kind() == protoreflect.GroupKind { // The name of the FieldDescriptor for a group field is // lowercased. To find the original capitalization, we // look in the field's MessageType. @@ -189,10 +189,10 @@ func Marshal(fd pref.FieldDescriptor, enumName string) string { // The previous implementation does not tag extension fields as proto3, // even when the field is defined in a proto3 file. Match that behavior // for consistency. - if fd.Syntax() == pref.Proto3 && !fd.IsExtension() { + if fd.Syntax() == protoreflect.Proto3 && !fd.IsExtension() { tag = append(tag, "proto3") } - if fd.Kind() == pref.EnumKind && enumName != "" { + if fd.Kind() == protoreflect.EnumKind && enumName != "" { tag = append(tag, "enum="+enumName) } if fd.ContainingOneof() != nil { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 37803773fa390..427c62d037fca 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -8,7 +8,6 @@ import ( "bytes" "fmt" "io" - "regexp" "strconv" "unicode/utf8" @@ -421,7 +420,7 @@ func (d *Decoder) parseFieldName() (tok Token, err error) { return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) } - return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in)) + return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in)) } // parseTypeName parses Any type URL or extension field name. The name is @@ -571,7 +570,7 @@ func (d *Decoder) parseScalar() (Token, error) { return tok, nil } - return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in)) + return Token{}, d.newSyntaxError("invalid scalar value: %s", errId(d.in)) } // parseLiteralValue parses a literal value. A literal value is used for @@ -653,8 +652,29 @@ func consume(b []byte, n int) []byte { return b } -// Any sequence that looks like a non-delimiter (for error reporting). -var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`) +// errId extracts a byte sequence that looks like an invalid ID +// (for the purposes of error reporting). +func errId(seq []byte) []byte { + const maxLen = 32 + for i := 0; i < len(seq); { + if i > maxLen { + return append(seq[:i:i], "…"...) + } + r, size := utf8.DecodeRune(seq[i:]) + if r > utf8.RuneSelf || (r != '/' && isDelim(byte(r))) { + if i == 0 { + // Either the first byte is invalid UTF-8 or a + // delimiter, or the first rune is non-ASCII. + // Return it as-is. + i = size + } + return seq[:i:i] + } + i += size + } + // No delimiter found. + return seq +} // isDelim returns true if given byte is a delimiter character. func isDelim(c byte) bool { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go index f2d90b78999f3..81a5d8c861390 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -50,8 +50,10 @@ type number struct { // parseNumber constructs a number object from given input. It allows for the // following patterns: -// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) -// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) +// +// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) +// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) +// // It also returns the number of parsed bytes for the given number, 0 if it is // not a number. func parseNumber(input []byte) number { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go index 0ce8d6fb83d9e..7ae6c2a3c26d3 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go @@ -24,6 +24,6 @@ // the Go implementation should as well. // // The text format is almost a superset of JSON except: -// * message keys are not quoted strings, but identifiers -// * the top-level value must be a message without the delimiters +// - message keys are not quoted strings, but identifiers +// - the top-level value must be a message without the delimiters package text diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go index b293b6947361d..7cac1c19016f9 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -12,8 +12,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoregistry" ) // Builder construct a protoreflect.FileDescriptor from the raw descriptor. @@ -38,7 +37,7 @@ type Builder struct { // TypeResolver resolves extension field types for descriptor options. // If nil, it uses protoregistry.GlobalTypes. TypeResolver interface { - preg.ExtensionTypeResolver + protoregistry.ExtensionTypeResolver } // FileRegistry is use to lookup file, enum, and message dependencies. @@ -46,8 +45,8 @@ type Builder struct { // If nil, it uses protoregistry.GlobalFiles. FileRegistry interface { FindFileByPath(string) (protoreflect.FileDescriptor, error) - FindDescriptorByName(pref.FullName) (pref.Descriptor, error) - RegisterFile(pref.FileDescriptor) error + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) + RegisterFile(protoreflect.FileDescriptor) error } } @@ -55,8 +54,8 @@ type Builder struct { // If so, it permits looking up an enum or message dependency based on the // sub-list and element index into filetype.Builder.DependencyIndexes. type resolverByIndex interface { - FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor - FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor + FindEnumByIndex(int32, int32, []Enum, []Message) protoreflect.EnumDescriptor + FindMessageByIndex(int32, int32, []Enum, []Message) protoreflect.MessageDescriptor } // Indexes of each sub-list in filetype.Builder.DependencyIndexes. @@ -70,7 +69,7 @@ const ( // Out is the output of the Builder. type Out struct { - File pref.FileDescriptor + File protoreflect.FileDescriptor // Enums is all enum descriptors in "flattened ordering". Enums []Enum @@ -97,10 +96,10 @@ func (db Builder) Build() (out Out) { // Initialize resolvers and registries if unpopulated. if db.TypeResolver == nil { - db.TypeResolver = preg.GlobalTypes + db.TypeResolver = protoregistry.GlobalTypes } if db.FileRegistry == nil { - db.FileRegistry = preg.GlobalFiles + db.FileRegistry = protoregistry.GlobalFiles } fd := newRawFile(db) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 98ab142aeee67..7c3689baee8a5 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -17,7 +17,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -43,9 +43,9 @@ type ( L2 *FileL2 } FileL1 struct { - Syntax pref.Syntax + Syntax protoreflect.Syntax Path string - Package pref.FullName + Package protoreflect.FullName Enums Enums Messages Messages @@ -53,36 +53,36 @@ type ( Services Services } FileL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Imports FileImports Locations SourceLocations } ) -func (fd *File) ParentFile() pref.FileDescriptor { return fd } -func (fd *File) Parent() pref.Descriptor { return nil } -func (fd *File) Index() int { return 0 } -func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax } -func (fd *File) Name() pref.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() pref.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } -func (fd *File) Options() pref.ProtoMessage { +func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } +func (fd *File) Parent() protoreflect.Descriptor { return nil } +func (fd *File) Index() int { return 0 } +func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() } return descopts.File } -func (fd *File) Path() string { return fd.L1.Path } -func (fd *File) Package() pref.FullName { return fd.L1.Package } -func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports } -func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums } -func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages } -func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions } -func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services } -func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations } -func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } -func (fd *File) ProtoType(pref.FileDescriptor) {} -func (fd *File) ProtoInternal(pragma.DoNotImplement) {} +func (fd *File) Path() string { return fd.L1.Path } +func (fd *File) Package() protoreflect.FullName { return fd.L1.Package } +func (fd *File) Imports() protoreflect.FileImports { return &fd.lazyInit().Imports } +func (fd *File) Enums() protoreflect.EnumDescriptors { return &fd.L1.Enums } +func (fd *File) Messages() protoreflect.MessageDescriptors { return &fd.L1.Messages } +func (fd *File) Extensions() protoreflect.ExtensionDescriptors { return &fd.L1.Extensions } +func (fd *File) Services() protoreflect.ServiceDescriptors { return &fd.L1.Services } +func (fd *File) SourceLocations() protoreflect.SourceLocations { return &fd.lazyInit().Locations } +func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *File) ProtoType(protoreflect.FileDescriptor) {} +func (fd *File) ProtoInternal(pragma.DoNotImplement) {} func (fd *File) lazyInit() *FileL2 { if atomic.LoadUint32(&fd.once) == 0 { @@ -119,7 +119,7 @@ type ( eagerValues bool // controls whether EnumL2.Values is already populated } EnumL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Values EnumValues ReservedNames Names ReservedRanges EnumRanges @@ -130,41 +130,41 @@ type ( L1 EnumValueL1 } EnumValueL1 struct { - Options func() pref.ProtoMessage - Number pref.EnumNumber + Options func() protoreflect.ProtoMessage + Number protoreflect.EnumNumber } ) -func (ed *Enum) Options() pref.ProtoMessage { +func (ed *Enum) Options() protoreflect.ProtoMessage { if f := ed.lazyInit().Options; f != nil { return f() } return descopts.Enum } -func (ed *Enum) Values() pref.EnumValueDescriptors { +func (ed *Enum) Values() protoreflect.EnumValueDescriptors { if ed.L1.eagerValues { return &ed.L2.Values } return &ed.lazyInit().Values } -func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames } -func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges } -func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } -func (ed *Enum) ProtoType(pref.EnumDescriptor) {} +func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit().ReservedNames } +func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges } +func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {} func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 } -func (ed *EnumValue) Options() pref.ProtoMessage { +func (ed *EnumValue) Options() protoreflect.ProtoMessage { if f := ed.L1.Options; f != nil { return f() } return descopts.EnumValue } -func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number } -func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } -func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {} +func (ed *EnumValue) Number() protoreflect.EnumNumber { return ed.L1.Number } +func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *EnumValue) ProtoType(protoreflect.EnumValueDescriptor) {} type ( Message struct { @@ -180,14 +180,14 @@ type ( IsMessageSet bool // promoted from google.protobuf.MessageOptions } MessageL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Fields Fields Oneofs Oneofs ReservedNames Names ReservedRanges FieldRanges RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality ExtensionRanges FieldRanges - ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges + ExtensionRangeOptions []func() protoreflect.ProtoMessage // must be same length as ExtensionRanges } Field struct { @@ -195,10 +195,10 @@ type ( L1 FieldL1 } FieldL1 struct { - Options func() pref.ProtoMessage - Number pref.FieldNumber - Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers - Kind pref.Kind + Options func() protoreflect.ProtoMessage + Number protoreflect.FieldNumber + Cardinality protoreflect.Cardinality // must be consistent with Message.RequiredNumbers + Kind protoreflect.Kind StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions @@ -207,9 +207,9 @@ type ( HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions EnforceUTF8 bool // promoted from google.protobuf.FieldOptions Default defaultValue - ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields - Enum pref.EnumDescriptor - Message pref.MessageDescriptor + ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields + Enum protoreflect.EnumDescriptor + Message protoreflect.MessageDescriptor } Oneof struct { @@ -217,35 +217,35 @@ type ( L1 OneofL1 } OneofL1 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Fields OneofFields // must be consistent with Message.Fields.ContainingOneof } ) -func (md *Message) Options() pref.ProtoMessage { +func (md *Message) Options() protoreflect.ProtoMessage { if f := md.lazyInit().Options; f != nil { return f() } return descopts.Message } -func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } -func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields } -func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs } -func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames } -func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges } -func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers } -func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges } -func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage { +func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } +func (md *Message) Fields() protoreflect.FieldDescriptors { return &md.lazyInit().Fields } +func (md *Message) Oneofs() protoreflect.OneofDescriptors { return &md.lazyInit().Oneofs } +func (md *Message) ReservedNames() protoreflect.Names { return &md.lazyInit().ReservedNames } +func (md *Message) ReservedRanges() protoreflect.FieldRanges { return &md.lazyInit().ReservedRanges } +func (md *Message) RequiredNumbers() protoreflect.FieldNumbers { return &md.lazyInit().RequiredNumbers } +func (md *Message) ExtensionRanges() protoreflect.FieldRanges { return &md.lazyInit().ExtensionRanges } +func (md *Message) ExtensionRangeOptions(i int) protoreflect.ProtoMessage { if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil { return f() } return descopts.ExtensionRange } -func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums } -func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages } -func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions } -func (md *Message) ProtoType(pref.MessageDescriptor) {} -func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Message) Enums() protoreflect.EnumDescriptors { return &md.L1.Enums } +func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L1.Messages } +func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions } +func (md *Message) ProtoType(protoreflect.MessageDescriptor) {} +func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } func (md *Message) lazyInit() *MessageL2 { md.L0.ParentFile.lazyInit() // implicitly initializes L2 return md.L2 @@ -260,28 +260,28 @@ func (md *Message) IsMessageSet() bool { return md.L1.IsMessageSet } -func (fd *Field) Options() pref.ProtoMessage { +func (fd *Field) Options() protoreflect.ProtoMessage { if f := fd.L1.Options; f != nil { return f() } return descopts.Field } -func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } -func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } -func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } -func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } -func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } +func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } +func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind } +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { - return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) + return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) } func (fd *Field) HasOptionalKeyword() bool { - return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional + return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional } func (fd *Field) IsPacked() bool { - if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated { + if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated { switch fd.L1.Kind { - case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind: + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: default: return true } @@ -290,40 +290,40 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } -func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() } +func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } -func (fd *Field) MapKey() pref.FieldDescriptor { +func (fd *Field) MapKey() protoreflect.FieldDescriptor { if !fd.IsMap() { return nil } return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number) } -func (fd *Field) MapValue() pref.FieldDescriptor { +func (fd *Field) MapValue() protoreflect.FieldDescriptor { if !fd.IsMap() { return nil } return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number) } -func (fd *Field) HasDefault() bool { return fd.L1.Default.has } -func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) } -func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum } -func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof } -func (fd *Field) ContainingMessage() pref.MessageDescriptor { - return fd.L0.Parent.(pref.MessageDescriptor) +func (fd *Field) HasDefault() bool { return fd.L1.Default.has } +func (fd *Field) Default() protoreflect.Value { return fd.L1.Default.get(fd) } +func (fd *Field) DefaultEnumValue() protoreflect.EnumValueDescriptor { return fd.L1.Default.enum } +func (fd *Field) ContainingOneof() protoreflect.OneofDescriptor { return fd.L1.ContainingOneof } +func (fd *Field) ContainingMessage() protoreflect.MessageDescriptor { + return fd.L0.Parent.(protoreflect.MessageDescriptor) } -func (fd *Field) Enum() pref.EnumDescriptor { +func (fd *Field) Enum() protoreflect.EnumDescriptor { return fd.L1.Enum } -func (fd *Field) Message() pref.MessageDescriptor { +func (fd *Field) Message() protoreflect.MessageDescriptor { if fd.L1.IsWeak { if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { - return d.(pref.MessageDescriptor) + return d.(protoreflect.MessageDescriptor) } } return fd.L1.Message } -func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } -func (fd *Field) ProtoType(pref.FieldDescriptor) {} +func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8 // validation for the string field. This exists for Google-internal use only @@ -336,21 +336,21 @@ func (fd *Field) EnforceUTF8() bool { if fd.L1.HasEnforceUTF8 { return fd.L1.EnforceUTF8 } - return fd.L0.ParentFile.L1.Syntax == pref.Proto3 + return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 } func (od *Oneof) IsSynthetic() bool { - return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() + return od.L0.ParentFile.L1.Syntax == protoreflect.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() } -func (od *Oneof) Options() pref.ProtoMessage { +func (od *Oneof) Options() protoreflect.ProtoMessage { if f := od.L1.Options; f != nil { return f() } return descopts.Oneof } -func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields } -func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } -func (od *Oneof) ProtoType(pref.OneofDescriptor) {} +func (od *Oneof) Fields() protoreflect.FieldDescriptors { return &od.L1.Fields } +func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } +func (od *Oneof) ProtoType(protoreflect.OneofDescriptor) {} type ( Extension struct { @@ -359,55 +359,57 @@ type ( L2 *ExtensionL2 // protected by fileDesc.once } ExtensionL1 struct { - Number pref.FieldNumber - Extendee pref.MessageDescriptor - Cardinality pref.Cardinality - Kind pref.Kind + Number protoreflect.FieldNumber + Extendee protoreflect.MessageDescriptor + Cardinality protoreflect.Cardinality + Kind protoreflect.Kind } ExtensionL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue - Enum pref.EnumDescriptor - Message pref.MessageDescriptor + Enum protoreflect.EnumDescriptor + Message protoreflect.MessageDescriptor } ) -func (xd *Extension) Options() pref.ProtoMessage { +func (xd *Extension) Options() protoreflect.ProtoMessage { if f := xd.lazyInit().Options; f != nil { return f() } return descopts.Field } -func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } -func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } -func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } -func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } -func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } -func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } -func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } +func (xd *Extension) Number() protoreflect.FieldNumber { return xd.L1.Number } +func (xd *Extension) Cardinality() protoreflect.Cardinality { return xd.L1.Cardinality } +func (xd *Extension) Kind() protoreflect.Kind { return xd.L1.Kind } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } +func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } +func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } +func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != protoreflect.Repeated } func (xd *Extension) HasOptionalKeyword() bool { - return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional -} -func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } -func (xd *Extension) IsExtension() bool { return true } -func (xd *Extension) IsWeak() bool { return false } -func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated } -func (xd *Extension) IsMap() bool { return false } -func (xd *Extension) MapKey() pref.FieldDescriptor { return nil } -func (xd *Extension) MapValue() pref.FieldDescriptor { return nil } -func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } -func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) } -func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum } -func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil } -func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee } -func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum } -func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message } -func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } -func (xd *Extension) ProtoType(pref.FieldDescriptor) {} -func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} + return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional +} +func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsExtension() bool { return true } +func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } +func (xd *Extension) IsMap() bool { return false } +func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } +func (xd *Extension) MapValue() protoreflect.FieldDescriptor { return nil } +func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } +func (xd *Extension) Default() protoreflect.Value { return xd.lazyInit().Default.get(xd) } +func (xd *Extension) DefaultEnumValue() protoreflect.EnumValueDescriptor { + return xd.lazyInit().Default.enum +} +func (xd *Extension) ContainingOneof() protoreflect.OneofDescriptor { return nil } +func (xd *Extension) ContainingMessage() protoreflect.MessageDescriptor { return xd.L1.Extendee } +func (xd *Extension) Enum() protoreflect.EnumDescriptor { return xd.lazyInit().Enum } +func (xd *Extension) Message() protoreflect.MessageDescriptor { return xd.lazyInit().Message } +func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } +func (xd *Extension) ProtoType(protoreflect.FieldDescriptor) {} +func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} func (xd *Extension) lazyInit() *ExtensionL2 { xd.L0.ParentFile.lazyInit() // implicitly initializes L2 return xd.L2 @@ -421,7 +423,7 @@ type ( } ServiceL1 struct{} ServiceL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Methods Methods } @@ -430,48 +432,48 @@ type ( L1 MethodL1 } MethodL1 struct { - Options func() pref.ProtoMessage - Input pref.MessageDescriptor - Output pref.MessageDescriptor + Options func() protoreflect.ProtoMessage + Input protoreflect.MessageDescriptor + Output protoreflect.MessageDescriptor IsStreamingClient bool IsStreamingServer bool } ) -func (sd *Service) Options() pref.ProtoMessage { +func (sd *Service) Options() protoreflect.ProtoMessage { if f := sd.lazyInit().Options; f != nil { return f() } return descopts.Service } -func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods } -func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } -func (sd *Service) ProtoType(pref.ServiceDescriptor) {} -func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} +func (sd *Service) Methods() protoreflect.MethodDescriptors { return &sd.lazyInit().Methods } +func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } +func (sd *Service) ProtoType(protoreflect.ServiceDescriptor) {} +func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} func (sd *Service) lazyInit() *ServiceL2 { sd.L0.ParentFile.lazyInit() // implicitly initializes L2 return sd.L2 } -func (md *Method) Options() pref.ProtoMessage { +func (md *Method) Options() protoreflect.ProtoMessage { if f := md.L1.Options; f != nil { return f() } return descopts.Method } -func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input } -func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output } -func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } -func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } -func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } -func (md *Method) ProtoType(pref.MethodDescriptor) {} -func (md *Method) ProtoInternal(pragma.DoNotImplement) {} +func (md *Method) Input() protoreflect.MessageDescriptor { return md.L1.Input } +func (md *Method) Output() protoreflect.MessageDescriptor { return md.L1.Output } +func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } +func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } +func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Method) ProtoType(protoreflect.MethodDescriptor) {} +func (md *Method) ProtoInternal(pragma.DoNotImplement) {} // Surrogate files are can be used to create standalone descriptors // where the syntax is only information derived from the parent file. var ( - SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}} - SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}} + SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} ) type ( @@ -479,24 +481,24 @@ type ( L0 BaseL0 } BaseL0 struct { - FullName pref.FullName // must be populated - ParentFile *File // must be populated - Parent pref.Descriptor + FullName protoreflect.FullName // must be populated + ParentFile *File // must be populated + Parent protoreflect.Descriptor Index int } ) -func (d *Base) Name() pref.Name { return d.L0.FullName.Name() } -func (d *Base) FullName() pref.FullName { return d.L0.FullName } -func (d *Base) ParentFile() pref.FileDescriptor { +func (d *Base) Name() protoreflect.Name { return d.L0.FullName.Name() } +func (d *Base) FullName() protoreflect.FullName { return d.L0.FullName } +func (d *Base) ParentFile() protoreflect.FileDescriptor { if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 { return nil // surrogate files are not real parents } return d.L0.ParentFile } -func (d *Base) Parent() pref.Descriptor { return d.L0.Parent } +func (d *Base) Parent() protoreflect.Descriptor { return d.L0.Parent } func (d *Base) Index() int { return d.L0.Index } -func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() } +func (d *Base) Syntax() protoreflect.Syntax { return d.L0.ParentFile.Syntax() } func (d *Base) IsPlaceholder() bool { return false } func (d *Base) ProtoInternal(pragma.DoNotImplement) {} @@ -513,7 +515,7 @@ func (s *stringName) InitJSON(name string) { s.nameJSON = name } -func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { +func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { s.once.Do(func() { if fd.IsExtension() { // For extensions, JSON and text are formatted the same way. @@ -533,7 +535,7 @@ func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { // Format the text name. s.nameText = string(fd.Name()) - if fd.Kind() == pref.GroupKind { + if fd.Kind() == protoreflect.GroupKind { s.nameText = string(fd.Message().Name()) } } @@ -541,10 +543,10 @@ func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { return s } -func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } -func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText } +func (s *stringName) getJSON(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } +func (s *stringName) getText(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameText } -func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { +func DefaultValue(v protoreflect.Value, ev protoreflect.EnumValueDescriptor) defaultValue { dv := defaultValue{has: v.IsValid(), val: v, enum: ev} if b, ok := v.Interface().([]byte); ok { // Store a copy of the default bytes, so that we can detect @@ -554,9 +556,9 @@ func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { return dv } -func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue { - var evs pref.EnumValueDescriptors - if k == pref.EnumKind { +func unmarshalDefault(b []byte, k protoreflect.Kind, pf *File, ed protoreflect.EnumDescriptor) defaultValue { + var evs protoreflect.EnumValueDescriptors + if k == protoreflect.EnumKind { // If the enum is declared within the same file, be careful not to // blindly call the Values method, lest we bind ourselves in a deadlock. if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf { @@ -567,9 +569,9 @@ func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) d // If we are unable to resolve the enum dependency, use a placeholder // enum value since we will not be able to parse the default value. - if ed.IsPlaceholder() && pref.Name(b).IsValid() { - v := pref.ValueOfEnum(0) - ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b))) + if ed.IsPlaceholder() && protoreflect.Name(b).IsValid() { + v := protoreflect.ValueOfEnum(0) + ev := PlaceholderEnumValue(ed.FullName().Parent().Append(protoreflect.Name(b))) return DefaultValue(v, ev) } } @@ -583,41 +585,41 @@ func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) d type defaultValue struct { has bool - val pref.Value - enum pref.EnumValueDescriptor + val protoreflect.Value + enum protoreflect.EnumValueDescriptor bytes []byte } -func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value { +func (dv *defaultValue) get(fd protoreflect.FieldDescriptor) protoreflect.Value { // Return the zero value as the default if unpopulated. if !dv.has { - if fd.Cardinality() == pref.Repeated { - return pref.Value{} + if fd.Cardinality() == protoreflect.Repeated { + return protoreflect.Value{} } switch fd.Kind() { - case pref.BoolKind: - return pref.ValueOfBool(false) - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: - return pref.ValueOfInt32(0) - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: - return pref.ValueOfInt64(0) - case pref.Uint32Kind, pref.Fixed32Kind: - return pref.ValueOfUint32(0) - case pref.Uint64Kind, pref.Fixed64Kind: - return pref.ValueOfUint64(0) - case pref.FloatKind: - return pref.ValueOfFloat32(0) - case pref.DoubleKind: - return pref.ValueOfFloat64(0) - case pref.StringKind: - return pref.ValueOfString("") - case pref.BytesKind: - return pref.ValueOfBytes(nil) - case pref.EnumKind: + case protoreflect.BoolKind: + return protoreflect.ValueOfBool(false) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return protoreflect.ValueOfInt32(0) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return protoreflect.ValueOfInt64(0) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return protoreflect.ValueOfUint32(0) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return protoreflect.ValueOfUint64(0) + case protoreflect.FloatKind: + return protoreflect.ValueOfFloat32(0) + case protoreflect.DoubleKind: + return protoreflect.ValueOfFloat64(0) + case protoreflect.StringKind: + return protoreflect.ValueOfString("") + case protoreflect.BytesKind: + return protoreflect.ValueOfBytes(nil) + case protoreflect.EnumKind: if evs := fd.Enum().Values(); evs.Len() > 0 { - return pref.ValueOfEnum(evs.Get(0).Number()) + return protoreflect.ValueOfEnum(evs.Get(0).Number()) } - return pref.ValueOfEnum(0) + return protoreflect.ValueOfEnum(0) } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 66e1fee522432..4a1584c9d29fd 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // fileRaw is a data struct used when initializing a file descriptor from @@ -95,7 +95,7 @@ func (fd *File) unmarshalSeed(b []byte) { sb := getBuilder() defer putBuilder(sb) - var prevField pref.FieldNumber + var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions, numServices int var posEnums, posMessages, posExtensions, posServices int b0 := b @@ -110,16 +110,16 @@ func (fd *File) unmarshalSeed(b []byte) { case genid.FileDescriptorProto_Syntax_field_number: switch string(v) { case "proto2": - fd.L1.Syntax = pref.Proto2 + fd.L1.Syntax = protoreflect.Proto2 case "proto3": - fd.L1.Syntax = pref.Proto3 + fd.L1.Syntax = protoreflect.Proto3 default: panic("invalid syntax") } case genid.FileDescriptorProto_Name_field_number: fd.L1.Path = sb.MakeString(v) case genid.FileDescriptorProto_Package_field_number: - fd.L1.Package = pref.FullName(sb.MakeString(v)) + fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) case genid.FileDescriptorProto_EnumType_field_number: if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { @@ -163,7 +163,7 @@ func (fd *File) unmarshalSeed(b []byte) { // If syntax is missing, it is assumed to be proto2. if fd.L1.Syntax == 0 { - fd.L1.Syntax = pref.Proto2 + fd.L1.Syntax = protoreflect.Proto2 } // Must allocate all declarations before parsing each descriptor type @@ -219,7 +219,7 @@ func (fd *File) unmarshalSeed(b []byte) { } } -func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { ed.L0.ParentFile = pf ed.L0.Parent = pd ed.L0.Index = i @@ -271,12 +271,12 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Desc } } -func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i - var prevField pref.FieldNumber + var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions int var posEnums, posMessages, posExtensions int b0 := b @@ -387,7 +387,7 @@ func (md *Message) unmarshalSeedOptions(b []byte) { } } -func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { xd.L0.ParentFile = pf xd.L0.Parent = pd xd.L0.Index = i @@ -401,11 +401,11 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref b = b[m:] switch num { case genid.FieldDescriptorProto_Number_field_number: - xd.L1.Number = pref.FieldNumber(v) + xd.L1.Number = protoreflect.FieldNumber(v) case genid.FieldDescriptorProto_Label_field_number: - xd.L1.Cardinality = pref.Cardinality(v) + xd.L1.Cardinality = protoreflect.Cardinality(v) case genid.FieldDescriptorProto_Type_field_number: - xd.L1.Kind = pref.Kind(v) + xd.L1.Kind = protoreflect.Kind(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -423,7 +423,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref } } -func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { sd.L0.ParentFile = pf sd.L0.Parent = pd sd.L0.Index = i @@ -459,13 +459,13 @@ func putBuilder(b *strs.Builder) { // makeFullName converts b to a protoreflect.FullName, // where b must start with a leading dot. -func makeFullName(sb *strs.Builder, b []byte) pref.FullName { +func makeFullName(sb *strs.Builder, b []byte) protoreflect.FullName { if len(b) == 0 || b[0] != '.' { panic("name reference must be fully qualified") } - return pref.FullName(sb.MakeString(b[1:])) + return protoreflect.FullName(sb.MakeString(b[1:])) } -func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName { - return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix))) +func appendFullName(sb *strs.Builder, prefix protoreflect.FullName, suffix []byte) protoreflect.FullName { + return sb.AppendFullName(prefix, protoreflect.Name(strs.UnsafeString(suffix))) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 198451e3ec941..736a19a75bc73 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -13,7 +13,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) func (fd *File) lazyRawInit() { @@ -39,10 +39,10 @@ func (file *File) resolveMessages() { // Resolve message field dependency. switch fd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx) depIdx++ - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ } @@ -62,10 +62,10 @@ func (file *File) resolveExtensions() { // Resolve extension field dependency. switch xd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx) depIdx++ - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx) depIdx++ } @@ -92,7 +92,7 @@ func (file *File) resolveServices() { } } -func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor { +func (file *File) resolveEnumDependency(ed protoreflect.EnumDescriptor, i, j int32) protoreflect.EnumDescriptor { r := file.builder.FileRegistry if r, ok := r.(resolverByIndex); ok { if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil { @@ -105,12 +105,12 @@ func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref } } if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil { - return d.(pref.EnumDescriptor) + return d.(protoreflect.EnumDescriptor) } return ed } -func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor { +func (file *File) resolveMessageDependency(md protoreflect.MessageDescriptor, i, j int32) protoreflect.MessageDescriptor { r := file.builder.FileRegistry if r, ok := r.(resolverByIndex); ok { if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil { @@ -123,7 +123,7 @@ func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32 } } if d, _ := r.FindDescriptorByName(md.FullName()); d != nil { - return d.(pref.MessageDescriptor) + return d.(protoreflect.MessageDescriptor) } return md } @@ -158,7 +158,7 @@ func (fd *File) unmarshalFull(b []byte) { if imp == nil { imp = PlaceholderFile(path) } - fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp}) + fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp}) case genid.FileDescriptorProto_EnumType_field_number: fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ @@ -199,7 +199,7 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { case genid.EnumDescriptorProto_Value_field_number: rawValues = append(rawValues, v) case genid.EnumDescriptorProto_ReservedName_field_number: - ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v))) case genid.EnumDescriptorProto_ReservedRange_field_number: ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) case genid.EnumDescriptorProto_Options_field_number: @@ -219,7 +219,7 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions) } -func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { +func unmarshalEnumReservedRange(b []byte) (r [2]protoreflect.EnumNumber) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) b = b[n:] @@ -229,9 +229,9 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { b = b[m:] switch num { case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number: - r[0] = pref.EnumNumber(v) + r[0] = protoreflect.EnumNumber(v) case genid.EnumDescriptorProto_EnumReservedRange_End_field_number: - r[1] = pref.EnumNumber(v) + r[1] = protoreflect.EnumNumber(v) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -241,7 +241,7 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { return r } -func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { vd.L0.ParentFile = pf vd.L0.Parent = pd vd.L0.Index = i @@ -256,7 +256,7 @@ func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref b = b[m:] switch num { case genid.EnumValueDescriptorProto_Number_field_number: - vd.L1.Number = pref.EnumNumber(v) + vd.L1.Number = protoreflect.EnumNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -294,7 +294,7 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { case genid.DescriptorProto_OneofDecl_field_number: rawOneofs = append(rawOneofs, v) case genid.DescriptorProto_ReservedName_field_number: - md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v))) case genid.DescriptorProto_ReservedRange_field_number: md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) case genid.DescriptorProto_ExtensionRange_field_number: @@ -326,7 +326,7 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { for i, b := range rawFields { fd := &md.L2.Fields.List[i] fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i) - if fd.L1.Cardinality == pref.Required { + if fd.L1.Cardinality == protoreflect.Required { md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number) } } @@ -359,7 +359,7 @@ func (md *Message) unmarshalOptions(b []byte) { } } -func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { +func unmarshalMessageReservedRange(b []byte) (r [2]protoreflect.FieldNumber) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) b = b[n:] @@ -369,9 +369,9 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { b = b[m:] switch num { case genid.DescriptorProto_ReservedRange_Start_field_number: - r[0] = pref.FieldNumber(v) + r[0] = protoreflect.FieldNumber(v) case genid.DescriptorProto_ReservedRange_End_field_number: - r[1] = pref.FieldNumber(v) + r[1] = protoreflect.FieldNumber(v) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -381,7 +381,7 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { return r } -func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) { +func unmarshalMessageExtensionRange(b []byte) (r [2]protoreflect.FieldNumber, rawOptions []byte) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) b = b[n:] @@ -391,9 +391,9 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions b = b[m:] switch num { case genid.DescriptorProto_ExtensionRange_Start_field_number: - r[0] = pref.FieldNumber(v) + r[0] = protoreflect.FieldNumber(v) case genid.DescriptorProto_ExtensionRange_End_field_number: - r[1] = pref.FieldNumber(v) + r[1] = protoreflect.FieldNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -410,7 +410,7 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions return r, rawOptions } -func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { fd.L0.ParentFile = pf fd.L0.Parent = pd fd.L0.Index = i @@ -426,11 +426,11 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des b = b[m:] switch num { case genid.FieldDescriptorProto_Number_field_number: - fd.L1.Number = pref.FieldNumber(v) + fd.L1.Number = protoreflect.FieldNumber(v) case genid.FieldDescriptorProto_Label_field_number: - fd.L1.Cardinality = pref.Cardinality(v) + fd.L1.Cardinality = protoreflect.Cardinality(v) case genid.FieldDescriptorProto_Type_field_number: - fd.L1.Kind = pref.Kind(v) + fd.L1.Kind = protoreflect.Kind(v) case genid.FieldDescriptorProto_OneofIndex_field_number: // In Message.unmarshalFull, we allocate slices for both // the field and oneof descriptors before unmarshaling either @@ -453,7 +453,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des case genid.FieldDescriptorProto_JsonName_field_number: fd.L1.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: - fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages + fd.L1.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: @@ -468,9 +468,9 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch fd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: fd.L1.Enum = PlaceholderEnum(name) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = PlaceholderMessage(name) } } @@ -504,7 +504,7 @@ func (fd *Field) unmarshalOptions(b []byte) { } } -func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { od.L0.ParentFile = pf od.L0.Parent = pd od.L0.Index = i @@ -553,7 +553,7 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { case genid.FieldDescriptorProto_JsonName_field_number: xd.L2.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: - xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions + xd.L2.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: @@ -568,9 +568,9 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch xd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: xd.L2.Enum = PlaceholderEnum(name) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: xd.L2.Message = PlaceholderMessage(name) } } @@ -627,7 +627,7 @@ func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions) } -func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i @@ -680,18 +680,18 @@ func appendOptions(dst, src []byte) []byte { // // The type of message to unmarshal to is passed as a pointer since the // vars in descopts may not yet be populated at the time this function is called. -func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage { +func (db *Builder) optionsUnmarshaler(p *protoreflect.ProtoMessage, b []byte) func() protoreflect.ProtoMessage { if b == nil { return nil } - var opts pref.ProtoMessage + var opts protoreflect.ProtoMessage var once sync.Once - return func() pref.ProtoMessage { + return func() protoreflect.ProtoMessage { once.Do(func() { if *p == nil { panic("Descriptor.Options called without importing the descriptor package") } - opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage) + opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(protoreflect.ProtoMessage) if err := (proto.UnmarshalOptions{ AllowPartial: true, Resolver: db.TypeResolver, diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go index aa294fff99a8f..e3b6587da63ab 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -17,31 +17,30 @@ import ( "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" ) -type FileImports []pref.FileImport +type FileImports []protoreflect.FileImport func (p *FileImports) Len() int { return len(*p) } -func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] } +func (p *FileImports) Get(i int) protoreflect.FileImport { return (*p)[i] } func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {} type Names struct { - List []pref.Name + List []protoreflect.Name once sync.Once - has map[pref.Name]int // protected by once + has map[protoreflect.Name]int // protected by once } func (p *Names) Len() int { return len(p.List) } -func (p *Names) Get(i int) pref.Name { return p.List[i] } -func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 } +func (p *Names) Get(i int) protoreflect.Name { return p.List[i] } +func (p *Names) Has(s protoreflect.Name) bool { return p.lazyInit().has[s] > 0 } func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } func (p *Names) ProtoInternal(pragma.DoNotImplement) {} func (p *Names) lazyInit() *Names { p.once.Do(func() { if len(p.List) > 0 { - p.has = make(map[pref.Name]int, len(p.List)) + p.has = make(map[protoreflect.Name]int, len(p.List)) for _, s := range p.List { p.has[s] = p.has[s] + 1 } @@ -67,14 +66,14 @@ func (p *Names) CheckValid() error { } type EnumRanges struct { - List [][2]pref.EnumNumber // start inclusive; end inclusive + List [][2]protoreflect.EnumNumber // start inclusive; end inclusive once sync.Once - sorted [][2]pref.EnumNumber // protected by once + sorted [][2]protoreflect.EnumNumber // protected by once } -func (p *EnumRanges) Len() int { return len(p.List) } -func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] } -func (p *EnumRanges) Has(n pref.EnumNumber) bool { +func (p *EnumRanges) Len() int { return len(p.List) } +func (p *EnumRanges) Get(i int) [2]protoreflect.EnumNumber { return p.List[i] } +func (p *EnumRanges) Has(n protoreflect.EnumNumber) bool { for ls := p.lazyInit().sorted; len(ls) > 0; { i := len(ls) / 2 switch r := enumRange(ls[i]); { @@ -129,14 +128,14 @@ func (r enumRange) String() string { } type FieldRanges struct { - List [][2]pref.FieldNumber // start inclusive; end exclusive + List [][2]protoreflect.FieldNumber // start inclusive; end exclusive once sync.Once - sorted [][2]pref.FieldNumber // protected by once + sorted [][2]protoreflect.FieldNumber // protected by once } -func (p *FieldRanges) Len() int { return len(p.List) } -func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] } -func (p *FieldRanges) Has(n pref.FieldNumber) bool { +func (p *FieldRanges) Len() int { return len(p.List) } +func (p *FieldRanges) Get(i int) [2]protoreflect.FieldNumber { return p.List[i] } +func (p *FieldRanges) Has(n protoreflect.FieldNumber) bool { for ls := p.lazyInit().sorted; len(ls) > 0; { i := len(ls) / 2 switch r := fieldRange(ls[i]); { @@ -221,17 +220,17 @@ func (r fieldRange) String() string { } type FieldNumbers struct { - List []pref.FieldNumber + List []protoreflect.FieldNumber once sync.Once - has map[pref.FieldNumber]struct{} // protected by once + has map[protoreflect.FieldNumber]struct{} // protected by once } -func (p *FieldNumbers) Len() int { return len(p.List) } -func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] } -func (p *FieldNumbers) Has(n pref.FieldNumber) bool { +func (p *FieldNumbers) Len() int { return len(p.List) } +func (p *FieldNumbers) Get(i int) protoreflect.FieldNumber { return p.List[i] } +func (p *FieldNumbers) Has(n protoreflect.FieldNumber) bool { p.once.Do(func() { if len(p.List) > 0 { - p.has = make(map[pref.FieldNumber]struct{}, len(p.List)) + p.has = make(map[protoreflect.FieldNumber]struct{}, len(p.List)) for _, n := range p.List { p.has[n] = struct{}{} } @@ -244,30 +243,38 @@ func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {} type OneofFields struct { - List []pref.FieldDescriptor + List []protoreflect.FieldDescriptor once sync.Once - byName map[pref.Name]pref.FieldDescriptor // protected by once - byJSON map[string]pref.FieldDescriptor // protected by once - byText map[string]pref.FieldDescriptor // protected by once - byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once + byName map[protoreflect.Name]protoreflect.FieldDescriptor // protected by once + byJSON map[string]protoreflect.FieldDescriptor // protected by once + byText map[string]protoreflect.FieldDescriptor // protected by once + byNum map[protoreflect.FieldNumber]protoreflect.FieldDescriptor // protected by once } -func (p *OneofFields) Len() int { return len(p.List) } -func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } -func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } -func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } -func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] } -func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } -func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } -func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} +func (p *OneofFields) Len() int { return len(p.List) } +func (p *OneofFields) Get(i int) protoreflect.FieldDescriptor { return p.List[i] } +func (p *OneofFields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor { + return p.lazyInit().byName[s] +} +func (p *OneofFields) ByJSONName(s string) protoreflect.FieldDescriptor { + return p.lazyInit().byJSON[s] +} +func (p *OneofFields) ByTextName(s string) protoreflect.FieldDescriptor { + return p.lazyInit().byText[s] +} +func (p *OneofFields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + return p.lazyInit().byNum[n] +} +func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} func (p *OneofFields) lazyInit() *OneofFields { p.once.Do(func() { if len(p.List) > 0 { - p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) - p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) - p.byText = make(map[string]pref.FieldDescriptor, len(p.List)) - p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) + p.byName = make(map[protoreflect.Name]protoreflect.FieldDescriptor, len(p.List)) + p.byJSON = make(map[string]protoreflect.FieldDescriptor, len(p.List)) + p.byText = make(map[string]protoreflect.FieldDescriptor, len(p.List)) + p.byNum = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor, len(p.List)) for _, f := range p.List { // Field names and numbers are guaranteed to be unique. p.byName[f.Name()] = f @@ -284,123 +291,123 @@ type SourceLocations struct { // List is a list of SourceLocations. // The SourceLocation.Next field does not need to be populated // as it will be lazily populated upon first need. - List []pref.SourceLocation + List []protoreflect.SourceLocation // File is the parent file descriptor that these locations are relative to. // If non-nil, ByDescriptor verifies that the provided descriptor // is a child of this file descriptor. - File pref.FileDescriptor + File protoreflect.FileDescriptor once sync.Once byPath map[pathKey]int } -func (p *SourceLocations) Len() int { return len(p.List) } -func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] } -func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation { +func (p *SourceLocations) Len() int { return len(p.List) } +func (p *SourceLocations) Get(i int) protoreflect.SourceLocation { return p.lazyInit().List[i] } +func (p *SourceLocations) byKey(k pathKey) protoreflect.SourceLocation { if i, ok := p.lazyInit().byPath[k]; ok { return p.List[i] } - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } -func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation { +func (p *SourceLocations) ByPath(path protoreflect.SourcePath) protoreflect.SourceLocation { return p.byKey(newPathKey(path)) } -func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation { +func (p *SourceLocations) ByDescriptor(desc protoreflect.Descriptor) protoreflect.SourceLocation { if p.File != nil && desc != nil && p.File != desc.ParentFile() { - return pref.SourceLocation{} // mismatching parent files + return protoreflect.SourceLocation{} // mismatching parent files } var pathArr [16]int32 path := pathArr[:0] for { switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: // Reverse the path since it was constructed in reverse. for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { path[i], path[j] = path[j], path[i] } return p.byKey(newPathKey(path)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_NestedType_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.FieldDescriptor: - isExtension := desc.(pref.FieldDescriptor).IsExtension() + case protoreflect.FieldDescriptor: + isExtension := desc.(protoreflect.FieldDescriptor).IsExtension() path = append(path, int32(desc.Index())) desc = desc.Parent() if isExtension { switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_Extension_field_number)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_Extension_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } } else { switch desc.(type) { - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_Field_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } } - case pref.OneofDescriptor: + case protoreflect.OneofDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.EnumDescriptor: + case protoreflect.EnumDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_EnumType_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.EnumValueDescriptor: + case protoreflect.EnumValueDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.EnumDescriptor: + case protoreflect.EnumDescriptor: path = append(path, int32(genid.EnumDescriptorProto_Value_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.ServiceDescriptor: + case protoreflect.ServiceDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_Service_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.MethodDescriptor: + case protoreflect.MethodDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.ServiceDescriptor: + case protoreflect.ServiceDescriptor: path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } } } @@ -435,7 +442,7 @@ type pathKey struct { str string // used if the path does not fit in arr } -func newPathKey(p pref.SourcePath) (k pathKey) { +func newPathKey(p protoreflect.SourcePath) (k pathKey) { if len(p) < len(k.arr) { for i, ps := range p { if ps < 0 || math.MaxUint8 <= ps { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go index dbf2c605bfe54..28240ebc5c4ab 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -7,7 +7,7 @@ package filedesc import ( "google.golang.org/protobuf/internal/descopts" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) var ( @@ -30,78 +30,80 @@ var ( // PlaceholderFile is a placeholder, representing only the file path. type PlaceholderFile string -func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f } -func (f PlaceholderFile) Parent() pref.Descriptor { return nil } -func (f PlaceholderFile) Index() int { return 0 } -func (f PlaceholderFile) Syntax() pref.Syntax { return 0 } -func (f PlaceholderFile) Name() pref.Name { return "" } -func (f PlaceholderFile) FullName() pref.FullName { return "" } -func (f PlaceholderFile) IsPlaceholder() bool { return true } -func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File } -func (f PlaceholderFile) Path() string { return string(f) } -func (f PlaceholderFile) Package() pref.FullName { return "" } -func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles } -func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages } -func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums } -func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions } -func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices } -func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations } -func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return } -func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } +func (f PlaceholderFile) ParentFile() protoreflect.FileDescriptor { return f } +func (f PlaceholderFile) Parent() protoreflect.Descriptor { return nil } +func (f PlaceholderFile) Index() int { return 0 } +func (f PlaceholderFile) Syntax() protoreflect.Syntax { return 0 } +func (f PlaceholderFile) Name() protoreflect.Name { return "" } +func (f PlaceholderFile) FullName() protoreflect.FullName { return "" } +func (f PlaceholderFile) IsPlaceholder() bool { return true } +func (f PlaceholderFile) Options() protoreflect.ProtoMessage { return descopts.File } +func (f PlaceholderFile) Path() string { return string(f) } +func (f PlaceholderFile) Package() protoreflect.FullName { return "" } +func (f PlaceholderFile) Imports() protoreflect.FileImports { return emptyFiles } +func (f PlaceholderFile) Messages() protoreflect.MessageDescriptors { return emptyMessages } +func (f PlaceholderFile) Enums() protoreflect.EnumDescriptors { return emptyEnums } +func (f PlaceholderFile) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions } +func (f PlaceholderFile) Services() protoreflect.ServiceDescriptors { return emptyServices } +func (f PlaceholderFile) SourceLocations() protoreflect.SourceLocations { return emptySourceLocations } +func (f PlaceholderFile) ProtoType(protoreflect.FileDescriptor) { return } +func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } // PlaceholderEnum is a placeholder, representing only the full name. -type PlaceholderEnum pref.FullName +type PlaceholderEnum protoreflect.FullName -func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil } -func (e PlaceholderEnum) Parent() pref.Descriptor { return nil } -func (e PlaceholderEnum) Index() int { return 0 } -func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 } -func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() } -func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) } -func (e PlaceholderEnum) IsPlaceholder() bool { return true } -func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum } -func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues } -func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames } -func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges } -func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return } -func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } +func (e PlaceholderEnum) ParentFile() protoreflect.FileDescriptor { return nil } +func (e PlaceholderEnum) Parent() protoreflect.Descriptor { return nil } +func (e PlaceholderEnum) Index() int { return 0 } +func (e PlaceholderEnum) Syntax() protoreflect.Syntax { return 0 } +func (e PlaceholderEnum) Name() protoreflect.Name { return protoreflect.FullName(e).Name() } +func (e PlaceholderEnum) FullName() protoreflect.FullName { return protoreflect.FullName(e) } +func (e PlaceholderEnum) IsPlaceholder() bool { return true } +func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return descopts.Enum } +func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues } +func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames } +func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return } +func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } // PlaceholderEnumValue is a placeholder, representing only the full name. -type PlaceholderEnumValue pref.FullName +type PlaceholderEnumValue protoreflect.FullName -func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil } -func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil } -func (e PlaceholderEnumValue) Index() int { return 0 } -func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 } -func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() } -func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) } -func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } -func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue } -func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 } -func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return } -func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } +func (e PlaceholderEnumValue) ParentFile() protoreflect.FileDescriptor { return nil } +func (e PlaceholderEnumValue) Parent() protoreflect.Descriptor { return nil } +func (e PlaceholderEnumValue) Index() int { return 0 } +func (e PlaceholderEnumValue) Syntax() protoreflect.Syntax { return 0 } +func (e PlaceholderEnumValue) Name() protoreflect.Name { return protoreflect.FullName(e).Name() } +func (e PlaceholderEnumValue) FullName() protoreflect.FullName { return protoreflect.FullName(e) } +func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } +func (e PlaceholderEnumValue) Options() protoreflect.ProtoMessage { return descopts.EnumValue } +func (e PlaceholderEnumValue) Number() protoreflect.EnumNumber { return 0 } +func (e PlaceholderEnumValue) ProtoType(protoreflect.EnumValueDescriptor) { return } +func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } // PlaceholderMessage is a placeholder, representing only the full name. -type PlaceholderMessage pref.FullName +type PlaceholderMessage protoreflect.FullName -func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil } -func (m PlaceholderMessage) Parent() pref.Descriptor { return nil } -func (m PlaceholderMessage) Index() int { return 0 } -func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 } -func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() } -func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) } -func (m PlaceholderMessage) IsPlaceholder() bool { return true } -func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message } -func (m PlaceholderMessage) IsMapEntry() bool { return false } -func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields } -func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs } -func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames } -func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges } -func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers } -func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges } -func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") } -func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages } -func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums } -func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions } -func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return } -func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } +func (m PlaceholderMessage) ParentFile() protoreflect.FileDescriptor { return nil } +func (m PlaceholderMessage) Parent() protoreflect.Descriptor { return nil } +func (m PlaceholderMessage) Index() int { return 0 } +func (m PlaceholderMessage) Syntax() protoreflect.Syntax { return 0 } +func (m PlaceholderMessage) Name() protoreflect.Name { return protoreflect.FullName(m).Name() } +func (m PlaceholderMessage) FullName() protoreflect.FullName { return protoreflect.FullName(m) } +func (m PlaceholderMessage) IsPlaceholder() bool { return true } +func (m PlaceholderMessage) Options() protoreflect.ProtoMessage { return descopts.Message } +func (m PlaceholderMessage) IsMapEntry() bool { return false } +func (m PlaceholderMessage) Fields() protoreflect.FieldDescriptors { return emptyFields } +func (m PlaceholderMessage) Oneofs() protoreflect.OneofDescriptors { return emptyOneofs } +func (m PlaceholderMessage) ReservedNames() protoreflect.Names { return emptyNames } +func (m PlaceholderMessage) ReservedRanges() protoreflect.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) RequiredNumbers() protoreflect.FieldNumbers { return emptyFieldNumbers } +func (m PlaceholderMessage) ExtensionRanges() protoreflect.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) ExtensionRangeOptions(int) protoreflect.ProtoMessage { + panic("index out of range") +} +func (m PlaceholderMessage) Messages() protoreflect.MessageDescriptors { return emptyMessages } +func (m PlaceholderMessage) Enums() protoreflect.EnumDescriptors { return emptyEnums } +func (m PlaceholderMessage) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions } +func (m PlaceholderMessage) ProtoType(protoreflect.MessageDescriptor) { return } +func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index 0a0dd35de5a7c..f0e38c4ef4e08 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -10,17 +10,16 @@ import ( "reflect" "google.golang.org/protobuf/internal/descopts" - fdesc "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/filedesc" pimpl "google.golang.org/protobuf/internal/impl" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" ) // Builder constructs type descriptors from a raw file descriptor // and associated Go types for each enum and message declaration. // -// -// Flattened Ordering +// # Flattened Ordering // // The protobuf type system represents declarations as a tree. Certain nodes in // the tree require us to either associate it with a concrete Go type or to @@ -52,7 +51,7 @@ import ( // that children themselves may have. type Builder struct { // File is the underlying file descriptor builder. - File fdesc.Builder + File filedesc.Builder // GoTypes is a unique set of the Go types for all declarations and // dependencies. Each type is represented as a zero value of the Go type. @@ -108,22 +107,22 @@ type Builder struct { // TypeRegistry is the registry to register each type descriptor. // If nil, it uses protoregistry.GlobalTypes. TypeRegistry interface { - RegisterMessage(pref.MessageType) error - RegisterEnum(pref.EnumType) error - RegisterExtension(pref.ExtensionType) error + RegisterMessage(protoreflect.MessageType) error + RegisterEnum(protoreflect.EnumType) error + RegisterExtension(protoreflect.ExtensionType) error } } // Out is the output of the builder. type Out struct { - File pref.FileDescriptor + File protoreflect.FileDescriptor } func (tb Builder) Build() (out Out) { // Replace the resolver with one that resolves dependencies by index, // which is faster and more reliable than relying on the global registry. if tb.File.FileRegistry == nil { - tb.File.FileRegistry = preg.GlobalFiles + tb.File.FileRegistry = protoregistry.GlobalFiles } tb.File.FileRegistry = &resolverByIndex{ goTypes: tb.GoTypes, @@ -133,7 +132,7 @@ func (tb Builder) Build() (out Out) { // Initialize registry if unpopulated. if tb.TypeRegistry == nil { - tb.TypeRegistry = preg.GlobalTypes + tb.TypeRegistry = protoregistry.GlobalTypes } fbOut := tb.File.Build() @@ -183,23 +182,23 @@ func (tb Builder) Build() (out Out) { for i := range fbOut.Messages { switch fbOut.Messages[i].Name() { case "FileOptions": - descopts.File = messageGoTypes[i].(pref.ProtoMessage) + descopts.File = messageGoTypes[i].(protoreflect.ProtoMessage) case "EnumOptions": - descopts.Enum = messageGoTypes[i].(pref.ProtoMessage) + descopts.Enum = messageGoTypes[i].(protoreflect.ProtoMessage) case "EnumValueOptions": - descopts.EnumValue = messageGoTypes[i].(pref.ProtoMessage) + descopts.EnumValue = messageGoTypes[i].(protoreflect.ProtoMessage) case "MessageOptions": - descopts.Message = messageGoTypes[i].(pref.ProtoMessage) + descopts.Message = messageGoTypes[i].(protoreflect.ProtoMessage) case "FieldOptions": - descopts.Field = messageGoTypes[i].(pref.ProtoMessage) + descopts.Field = messageGoTypes[i].(protoreflect.ProtoMessage) case "OneofOptions": - descopts.Oneof = messageGoTypes[i].(pref.ProtoMessage) + descopts.Oneof = messageGoTypes[i].(protoreflect.ProtoMessage) case "ExtensionRangeOptions": - descopts.ExtensionRange = messageGoTypes[i].(pref.ProtoMessage) + descopts.ExtensionRange = messageGoTypes[i].(protoreflect.ProtoMessage) case "ServiceOptions": - descopts.Service = messageGoTypes[i].(pref.ProtoMessage) + descopts.Service = messageGoTypes[i].(protoreflect.ProtoMessage) case "MethodOptions": - descopts.Method = messageGoTypes[i].(pref.ProtoMessage) + descopts.Method = messageGoTypes[i].(protoreflect.ProtoMessage) } } } @@ -216,11 +215,11 @@ func (tb Builder) Build() (out Out) { const listExtDeps = 2 var goType reflect.Type switch fbOut.Extensions[i].L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) goType = reflect.TypeOf(tb.GoTypes[j]) depIdx++ - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) goType = reflect.TypeOf(tb.GoTypes[j]) depIdx++ @@ -242,22 +241,22 @@ func (tb Builder) Build() (out Out) { return out } -var goTypeForPBKind = map[pref.Kind]reflect.Type{ - pref.BoolKind: reflect.TypeOf(bool(false)), - pref.Int32Kind: reflect.TypeOf(int32(0)), - pref.Sint32Kind: reflect.TypeOf(int32(0)), - pref.Sfixed32Kind: reflect.TypeOf(int32(0)), - pref.Int64Kind: reflect.TypeOf(int64(0)), - pref.Sint64Kind: reflect.TypeOf(int64(0)), - pref.Sfixed64Kind: reflect.TypeOf(int64(0)), - pref.Uint32Kind: reflect.TypeOf(uint32(0)), - pref.Fixed32Kind: reflect.TypeOf(uint32(0)), - pref.Uint64Kind: reflect.TypeOf(uint64(0)), - pref.Fixed64Kind: reflect.TypeOf(uint64(0)), - pref.FloatKind: reflect.TypeOf(float32(0)), - pref.DoubleKind: reflect.TypeOf(float64(0)), - pref.StringKind: reflect.TypeOf(string("")), - pref.BytesKind: reflect.TypeOf([]byte(nil)), +var goTypeForPBKind = map[protoreflect.Kind]reflect.Type{ + protoreflect.BoolKind: reflect.TypeOf(bool(false)), + protoreflect.Int32Kind: reflect.TypeOf(int32(0)), + protoreflect.Sint32Kind: reflect.TypeOf(int32(0)), + protoreflect.Sfixed32Kind: reflect.TypeOf(int32(0)), + protoreflect.Int64Kind: reflect.TypeOf(int64(0)), + protoreflect.Sint64Kind: reflect.TypeOf(int64(0)), + protoreflect.Sfixed64Kind: reflect.TypeOf(int64(0)), + protoreflect.Uint32Kind: reflect.TypeOf(uint32(0)), + protoreflect.Fixed32Kind: reflect.TypeOf(uint32(0)), + protoreflect.Uint64Kind: reflect.TypeOf(uint64(0)), + protoreflect.Fixed64Kind: reflect.TypeOf(uint64(0)), + protoreflect.FloatKind: reflect.TypeOf(float32(0)), + protoreflect.DoubleKind: reflect.TypeOf(float64(0)), + protoreflect.StringKind: reflect.TypeOf(string("")), + protoreflect.BytesKind: reflect.TypeOf([]byte(nil)), } type depIdxs []int32 @@ -274,13 +273,13 @@ type ( fileRegistry } fileRegistry interface { - FindFileByPath(string) (pref.FileDescriptor, error) - FindDescriptorByName(pref.FullName) (pref.Descriptor, error) - RegisterFile(pref.FileDescriptor) error + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) + RegisterFile(protoreflect.FileDescriptor) error } ) -func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.EnumDescriptor { +func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.EnumDescriptor { if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) { return &es[depIdx] } else { @@ -288,7 +287,7 @@ func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdes } } -func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.MessageDescriptor { +func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.MessageDescriptor { if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) { return &ms[depIdx-len(es)] } else { diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index abee5f30e9fd0..a371f98de143f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -12,8 +12,8 @@ import ( "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Export is a zero-length named type that exists only to export a set of @@ -32,11 +32,11 @@ type enum = interface{} // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. -func (Export) EnumOf(e enum) pref.Enum { +func (Export) EnumOf(e enum) protoreflect.Enum { switch e := e.(type) { case nil: return nil - case pref.Enum: + case protoreflect.Enum: return e default: return legacyWrapEnum(reflect.ValueOf(e)) @@ -45,11 +45,11 @@ func (Export) EnumOf(e enum) pref.Enum { // EnumDescriptorOf returns the protoreflect.EnumDescriptor for e. // It returns nil if e is nil. -func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { +func (Export) EnumDescriptorOf(e enum) protoreflect.EnumDescriptor { switch e := e.(type) { case nil: return nil - case pref.Enum: + case protoreflect.Enum: return e.Descriptor() default: return LegacyLoadEnumDesc(reflect.TypeOf(e)) @@ -58,11 +58,11 @@ func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { // EnumTypeOf returns the protoreflect.EnumType for e. // It returns nil if e is nil. -func (Export) EnumTypeOf(e enum) pref.EnumType { +func (Export) EnumTypeOf(e enum) protoreflect.EnumType { switch e := e.(type) { case nil: return nil - case pref.Enum: + case protoreflect.Enum: return e.Type() default: return legacyLoadEnumType(reflect.TypeOf(e)) @@ -71,7 +71,7 @@ func (Export) EnumTypeOf(e enum) pref.EnumType { // EnumStringOf returns the enum value as a string, either as the name if // the number is resolvable, or the number formatted as a string. -func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { +func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNumber) string { ev := ed.Values().ByNumber(n) if ev != nil { return string(ev.Name()) @@ -84,7 +84,7 @@ func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { type message = interface{} // legacyMessageWrapper wraps a v2 message as a v1 message. -type legacyMessageWrapper struct{ m pref.ProtoMessage } +type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) } func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) } @@ -92,30 +92,30 @@ func (m legacyMessageWrapper) ProtoMessage() {} // ProtoMessageV1Of converts either a v1 or v2 message to a v1 message. // It returns nil if m is nil. -func (Export) ProtoMessageV1Of(m message) piface.MessageV1 { +func (Export) ProtoMessageV1Of(m message) protoiface.MessageV1 { switch mv := m.(type) { case nil: return nil - case piface.MessageV1: + case protoiface.MessageV1: return mv case unwrapper: return Export{}.ProtoMessageV1Of(mv.protoUnwrap()) - case pref.ProtoMessage: + case protoreflect.ProtoMessage: return legacyMessageWrapper{mv} default: panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) } } -func (Export) protoMessageV2Of(m message) pref.ProtoMessage { +func (Export) protoMessageV2Of(m message) protoreflect.ProtoMessage { switch mv := m.(type) { case nil: return nil - case pref.ProtoMessage: + case protoreflect.ProtoMessage: return mv case legacyMessageWrapper: return mv.m - case piface.MessageV1: + case protoiface.MessageV1: return nil default: panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) @@ -124,7 +124,7 @@ func (Export) protoMessageV2Of(m message) pref.ProtoMessage { // ProtoMessageV2Of converts either a v1 or v2 message to a v2 message. // It returns nil if m is nil. -func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { +func (Export) ProtoMessageV2Of(m message) protoreflect.ProtoMessage { if m == nil { return nil } @@ -136,7 +136,7 @@ func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { // MessageOf returns the protoreflect.Message interface over m. // It returns nil if m is nil. -func (Export) MessageOf(m message) pref.Message { +func (Export) MessageOf(m message) protoreflect.Message { if m == nil { return nil } @@ -148,7 +148,7 @@ func (Export) MessageOf(m message) pref.Message { // MessageDescriptorOf returns the protoreflect.MessageDescriptor for m. // It returns nil if m is nil. -func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { +func (Export) MessageDescriptorOf(m message) protoreflect.MessageDescriptor { if m == nil { return nil } @@ -160,7 +160,7 @@ func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { // MessageTypeOf returns the protoreflect.MessageType for m. // It returns nil if m is nil. -func (Export) MessageTypeOf(m message) pref.MessageType { +func (Export) MessageTypeOf(m message) protoreflect.MessageType { if m == nil { return nil } @@ -172,6 +172,6 @@ func (Export) MessageTypeOf(m message) pref.MessageType { // MessageStringOf returns the message value as a string, // which is the message serialized in the protobuf text format. -func (Export) MessageStringOf(m pref.ProtoMessage) string { +func (Export) MessageStringOf(m protoreflect.ProtoMessage) string { return prototext.MarshalOptions{Multiline: false}.Format(m) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index b82341e575cb3..bff041edc946c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -8,18 +8,18 @@ import ( "sync" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) -func (mi *MessageInfo) checkInitialized(in piface.CheckInitializedInput) (piface.CheckInitializedOutput, error) { +func (mi *MessageInfo) checkInitialized(in protoiface.CheckInitializedInput) (protoiface.CheckInitializedOutput, error) { var p pointer if ms, ok := in.Message.(*messageState); ok { p = ms.pointer() } else { p = in.Message.(*messageReflectWrapper).pointer() } - return piface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) + return protoiface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) } func (mi *MessageInfo) checkInitializedPointer(p pointer) error { @@ -90,7 +90,7 @@ var ( // needsInitCheck reports whether a message needs to be checked for partial initialization. // // It returns true if the message transitively includes any required or extension fields. -func needsInitCheck(md pref.MessageDescriptor) bool { +func needsInitCheck(md protoreflect.MessageDescriptor) bool { if v, ok := needsInitCheckMap.Load(md); ok { if has, ok := v.(bool); ok { return has @@ -101,7 +101,7 @@ func needsInitCheck(md pref.MessageDescriptor) bool { return needsInitCheckLocked(md) } -func needsInitCheckLocked(md pref.MessageDescriptor) (has bool) { +func needsInitCheckLocked(md protoreflect.MessageDescriptor) (has bool) { if v, ok := needsInitCheckMap.Load(md); ok { // If has is true, we've previously determined that this message // needs init checks. diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 08d35170b66cc..e74cefdc506fb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type extensionFieldInfo struct { @@ -23,7 +23,7 @@ type extensionFieldInfo struct { var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo -func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { +func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := xt.(*ExtensionInfo); ok { xi.lazyInit() return xi.info @@ -32,7 +32,7 @@ func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { } // legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. -func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { +func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { return xi.(*extensionFieldInfo) } @@ -43,7 +43,7 @@ func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { return e } -func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { +func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { var wiretag uint64 if !xd.IsPacked() { wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()]) @@ -59,10 +59,10 @@ func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { // This is true for composite types, where we pass in a message, list, or map to fill in, // and for enums, where we pass in a prototype value to specify the concrete enum type. switch xd.Kind() { - case pref.MessageKind, pref.GroupKind, pref.EnumKind: + case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.EnumKind: e.unmarshalNeedsValue = true default: - if xd.Cardinality() == pref.Repeated { + if xd.Cardinality() == protoreflect.Repeated { e.unmarshalNeedsValue = true } } @@ -73,21 +73,21 @@ type lazyExtensionValue struct { atomicOnce uint32 // atomically set if value is valid mu sync.Mutex xi *extensionFieldInfo - value pref.Value + value protoreflect.Value b []byte - fn func() pref.Value + fn func() protoreflect.Value } type ExtensionField struct { - typ pref.ExtensionType + typ protoreflect.ExtensionType // value is either the value of GetValue, // or a *lazyExtensionValue that then returns the value of GetValue. - value pref.Value + value protoreflect.Value lazy *lazyExtensionValue } -func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { +func (f *ExtensionField) appendLazyBytes(xt protoreflect.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { if f.lazy == nil { f.lazy = &lazyExtensionValue{xi: xi} } @@ -97,7 +97,7 @@ func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFie f.lazy.b = append(f.lazy.b, b...) } -func (f *ExtensionField) canLazy(xt pref.ExtensionType) bool { +func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { if f.typ == nil { return true } @@ -154,7 +154,7 @@ func (f *ExtensionField) lazyInit() { // Set sets the type and value of the extension field. // This must not be called concurrently. -func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { +func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) { f.typ = t f.value = v f.lazy = nil @@ -162,14 +162,14 @@ func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { // SetLazy sets the type and a value that is to be lazily evaluated upon first use. // This must not be called concurrently. -func (f *ExtensionField) SetLazy(t pref.ExtensionType, fn func() pref.Value) { +func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { f.typ = t f.lazy = &lazyExtensionValue{fn: fn} } // Value returns the value of the extension field. // This may be called concurrently. -func (f *ExtensionField) Value() pref.Value { +func (f *ExtensionField) Value() protoreflect.Value { if f.lazy != nil { if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { f.lazyInit() @@ -181,7 +181,7 @@ func (f *ExtensionField) Value() pref.Value { // Type returns the type of the extension field. // This may be called concurrently. -func (f ExtensionField) Type() pref.ExtensionType { +func (f ExtensionField) Type() protoreflect.ExtensionType { return f.typ } @@ -193,7 +193,7 @@ func (f ExtensionField) IsSet() bool { // IsLazy reports whether a field is lazily encoded. // It is exported for testing. -func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { +func IsLazy(m protoreflect.Message, fd protoreflect.FieldDescriptor) bool { var mi *MessageInfo var p pointer switch m := m.(type) { @@ -206,7 +206,7 @@ func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { default: return false } - xd, ok := fd.(pref.ExtensionTypeDescriptor) + xd, ok := fd.(protoreflect.ExtensionTypeDescriptor) if !ok { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index cb4b482d166f5..3fadd241e1c44 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -12,9 +12,9 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" ) type errInvalidUTF8 struct{} @@ -30,7 +30,7 @@ func (errInvalidUTF8) Unwrap() error { return errors.Error } // to the appropriate field-specific function as necessary. // // The unmarshal function is set on each field individually as usual. -func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structInfo) { +func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si structInfo) { fs := si.oneofsByName[od.Name()] ft := fs.Type oneofFields := make(map[reflect.Type]*coderFieldInfo) @@ -118,13 +118,13 @@ func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structIn } } -func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { +func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs { var once sync.Once - var messageType pref.MessageType + var messageType protoreflect.MessageType lazyInit := func() { once.Do(func() { messageName := fd.Message().FullName() - messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) }) } @@ -190,7 +190,7 @@ func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { } } -func makeMessageFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ size: sizeMessageInfo, @@ -280,7 +280,7 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh if n < 0 { return out, errDecode } - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: v, Message: m.ProtoReflect(), }) @@ -288,27 +288,27 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh return out, err } out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } -func sizeMessageValue(v pref.Value, tagsize int, opts marshalOptions) int { +func sizeMessageValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { m := v.Message().Interface() return sizeMessage(m, tagsize, opts) } -func appendMessageValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendMessageValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { m := v.Message().Interface() return appendMessage(b, m, wiretag, opts) } -func consumeMessageValue(b []byte, v pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { +func consumeMessageValue(b []byte, v protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) { m := v.Message().Interface() out, err := consumeMessage(b, m, wtyp, opts) return v, out, err } -func isInitMessageValue(v pref.Value) error { +func isInitMessageValue(v protoreflect.Value) error { m := v.Message().Interface() return proto.CheckInitialized(m) } @@ -321,17 +321,17 @@ var coderMessageValue = valueCoderFuncs{ merge: mergeMessageValue, } -func sizeGroupValue(v pref.Value, tagsize int, opts marshalOptions) int { +func sizeGroupValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { m := v.Message().Interface() return sizeGroup(m, tagsize, opts) } -func appendGroupValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendGroupValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { m := v.Message().Interface() return appendGroup(b, m, wiretag, opts) } -func consumeGroupValue(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { +func consumeGroupValue(b []byte, v protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) { m := v.Message().Interface() out, err := consumeGroup(b, m, num, wtyp, opts) return v, out, err @@ -345,7 +345,7 @@ var coderGroupValue = valueCoderFuncs{ merge: mergeMessageValue, } -func makeGroupFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeGroupFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { num := fd.Number() if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ @@ -424,7 +424,7 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir if n < 0 { return out, errDecode } - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: b, Message: m.ProtoReflect(), }) @@ -432,11 +432,11 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir return out, err } out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } -func makeMessageSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeMessageSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ size: sizeMessageSliceInfo, @@ -555,7 +555,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir return out, errDecode } mp := reflect.New(goType.Elem()) - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: v, Message: asMessage(mp).ProtoReflect(), }) @@ -564,7 +564,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir } p.AppendPointerSlice(pointerOfValue(mp)) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } @@ -581,7 +581,7 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error { // Slices of messages -func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { +func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { @@ -591,7 +591,7 @@ func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) i return n } -func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() mopts := opts.Options() for i, llen := 0, list.Len(); i < llen; i++ { @@ -608,30 +608,30 @@ func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts ma return b, nil } -func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { +func consumeMessageSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.BytesType { - return pref.Value{}, out, errUnknown + return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return pref.Value{}, out, errDecode + return protoreflect.Value{}, out, errDecode } m := list.NewElement() - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: v, Message: m.Message(), }) if err != nil { - return pref.Value{}, out, err + return protoreflect.Value{}, out, err } list.Append(m) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return listv, out, nil } -func isInitMessageSliceValue(listv pref.Value) error { +func isInitMessageSliceValue(listv protoreflect.Value) error { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() @@ -650,7 +650,7 @@ var coderMessageSliceValue = valueCoderFuncs{ merge: mergeMessageListValue, } -func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { +func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { @@ -660,7 +660,7 @@ func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int return n } -func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendGroupSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() mopts := opts.Options() for i, llen := 0, list.Len(); i < llen; i++ { @@ -676,26 +676,26 @@ func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts mars return b, nil } -func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { +func consumeGroupSliceValue(b []byte, listv protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.StartGroupType { - return pref.Value{}, out, errUnknown + return protoreflect.Value{}, out, errUnknown } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return pref.Value{}, out, errDecode + return protoreflect.Value{}, out, errDecode } m := list.NewElement() - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: b, Message: m.Message(), }) if err != nil { - return pref.Value{}, out, err + return protoreflect.Value{}, out, err } list.Append(m) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return listv, out, nil } @@ -707,7 +707,7 @@ var coderGroupSliceValue = valueCoderFuncs{ merge: mergeMessageListValue, } -func makeGroupSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { num := fd.Number() if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ @@ -772,7 +772,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire return out, errDecode } mp := reflect.New(goType.Elem()) - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: b, Message: asMessage(mp).ProtoReflect(), }) @@ -781,7 +781,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire } p.AppendPointerSlice(pointerOfValue(mp)) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } @@ -822,8 +822,8 @@ func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFie return out, nil } -func asMessage(v reflect.Value) pref.ProtoMessage { - if m, ok := v.Interface().(pref.ProtoMessage); ok { +func asMessage(v reflect.Value) protoreflect.ProtoMessage { + if m, ok := v.Interface().(protoreflect.ProtoMessage); ok { return m } return legacyWrapMessage(v).Interface() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index c1245fef48765..111b9d16f993b 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/genid" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type mapInfo struct { @@ -19,12 +19,12 @@ type mapInfo struct { valWiretag uint64 keyFuncs valueCoderFuncs valFuncs valueCoderFuncs - keyZero pref.Value - keyKind pref.Kind + keyZero protoreflect.Value + keyKind protoreflect.Kind conv *mapConverter } -func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { +func encoderFuncsForMap(fd protoreflect.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { // TODO: Consider generating specialized map coders. keyField := fd.MapKey() valField := fd.MapValue() @@ -44,7 +44,7 @@ func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage keyKind: keyField.Kind(), conv: conv, } - if valField.Kind() == pref.MessageKind { + if valField.Kind() == protoreflect.MessageKind { valueMessage = getMessageInfo(ft.Elem()) } @@ -68,9 +68,9 @@ func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage }, } switch valField.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: funcs.merge = mergeMapOfMessage - case pref.BytesKind: + case protoreflect.BytesKind: funcs.merge = mergeMapOfBytes default: funcs.merge = mergeMap @@ -135,7 +135,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo err := errUnknown switch num { case genid.MapEntry_Key_field_number: - var v pref.Value + var v protoreflect.Value var o unmarshalOutput v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) if err != nil { @@ -144,7 +144,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo key = v n = o.n case genid.MapEntry_Value_field_number: - var v pref.Value + var v protoreflect.Value var o unmarshalOutput v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) if err != nil { @@ -192,7 +192,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi err := errUnknown switch num { case 1: - var v pref.Value + var v protoreflect.Value var o unmarshalOutput v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) if err != nil { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index cd40527ff6462..6b2fdbb739a23 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -12,15 +12,15 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/order" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // coderMessageInfo contains per-message information used by the fast-path functions. // This is a different type from MessageInfo to keep MessageInfo as general-purpose as // possible. type coderMessageInfo struct { - methods piface.Methods + methods protoiface.Methods orderedCoderFields []*coderFieldInfo denseCoderFields []*coderFieldInfo @@ -38,13 +38,13 @@ type coderFieldInfo struct { funcs pointerCoderFuncs // fast-path per-field functions mi *MessageInfo // field's message ft reflect.Type - validation validationInfo // information used by message validation - num pref.FieldNumber // field number - offset offset // struct field offset - wiretag uint64 // field tag (number + wire type) - tagsize int // size of the varint-encoded tag - isPointer bool // true if IsNil may be called on the struct field - isRequired bool // true if field is required + validation validationInfo // information used by message validation + num protoreflect.FieldNumber // field number + offset offset // struct field offset + wiretag uint64 // field tag (number + wire type) + tagsize int // size of the varint-encoded tag + isPointer bool // true if IsNil may be called on the struct field + isRequired bool // true if field is required } func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { @@ -125,8 +125,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { funcs: funcs, mi: childMessage, validation: newFieldValidationInfo(mi, si, fd, ft), - isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(), - isRequired: fd.Cardinality() == pref.Required, + isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(), + isRequired: fd.Cardinality() == protoreflect.Required, } mi.orderedCoderFields = append(mi.orderedCoderFields, cf) mi.coderFields[cf.num] = cf @@ -149,7 +149,7 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num }) - var maxDense pref.FieldNumber + var maxDense protoreflect.FieldNumber for _, cf := range mi.orderedCoderFields { if cf.num >= 16 && cf.num >= 2*maxDense { break @@ -175,12 +175,12 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.needsInitCheck = needsInitCheck(mi.Desc) if mi.methods.Marshal == nil && mi.methods.Size == nil { - mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Flags |= protoiface.SupportMarshalDeterministic mi.methods.Marshal = mi.marshal mi.methods.Size = mi.size } if mi.methods.Unmarshal == nil { - mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown + mi.methods.Flags |= protoiface.SupportUnmarshalDiscardUnknown mi.methods.Unmarshal = mi.unmarshal } if mi.methods.CheckInitialized == nil { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go index e89971238879f..576dcf3aac50a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // pointerCoderFuncs is a set of pointer encoding functions. @@ -25,83 +25,83 @@ type pointerCoderFuncs struct { // valueCoderFuncs is a set of protoreflect.Value encoding functions. type valueCoderFuncs struct { - size func(v pref.Value, tagsize int, opts marshalOptions) int - marshal func(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) - unmarshal func(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) - isInit func(v pref.Value) error - merge func(dst, src pref.Value, opts mergeOptions) pref.Value + size func(v protoreflect.Value, tagsize int, opts marshalOptions) int + marshal func(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, v protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) + isInit func(v protoreflect.Value) error + merge func(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value } // fieldCoder returns pointer functions for a field, used for operating on // struct fields. -func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { +func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { switch { case fd.IsMap(): return encoderFuncsForMap(fd, ft) - case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && !fd.IsPacked(): // Repeated fields (not packed). if ft.Kind() != reflect.Slice { break } ft := ft.Elem() switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolSlice } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumSlice } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32Slice } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32Slice } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32Slice } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64Slice } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64Slice } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64Slice } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32Slice } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32Slice } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatSlice } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64Slice } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64Slice } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoubleSlice } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringSliceValidateUTF8 } @@ -114,19 +114,19 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytesSlice } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderStringSlice } if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytesSlice } - case pref.MessageKind: + case protoreflect.MessageKind: return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft) - case pref.GroupKind: + case protoreflect.GroupKind: return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft) } - case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && fd.IsPacked(): // Packed repeated fields. // // Only repeated fields of primitive numeric types @@ -136,128 +136,128 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer } ft := ft.Elem() switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolPackedSlice } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumPackedSlice } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32PackedSlice } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32PackedSlice } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32PackedSlice } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64PackedSlice } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64PackedSlice } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64PackedSlice } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32PackedSlice } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32PackedSlice } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatPackedSlice } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64PackedSlice } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64PackedSlice } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoublePackedSlice } } - case fd.Kind() == pref.MessageKind: + case fd.Kind() == protoreflect.MessageKind: return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) - case fd.Kind() == pref.GroupKind: + case fd.Kind() == protoreflect.GroupKind: return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) - case fd.Syntax() == pref.Proto3 && fd.ContainingOneof() == nil: + case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil: // Populated oneof fields always encode even if set to the zero value, // which normally are not encoded in proto3. switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolNoZero } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumNoZero } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32NoZero } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32NoZero } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32NoZero } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64NoZero } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64NoZero } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64NoZero } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32NoZero } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32NoZero } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatNoZero } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64NoZero } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64NoZero } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoubleNoZero } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringNoZeroValidateUTF8 } @@ -270,7 +270,7 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytesNoZero } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderStringNoZero } @@ -281,133 +281,133 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer case ft.Kind() == reflect.Ptr: ft := ft.Elem() switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolPtr } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumPtr } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32Ptr } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32Ptr } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32Ptr } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64Ptr } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64Ptr } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64Ptr } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32Ptr } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32Ptr } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatPtr } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64Ptr } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64Ptr } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoublePtr } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringPtrValidateUTF8 } if ft.Kind() == reflect.String { return nil, coderStringPtr } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderStringPtr } } default: switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBool } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnum } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32 } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32 } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32 } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64 } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64 } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64 } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32 } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32 } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloat } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64 } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64 } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDouble } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringValidateUTF8 } @@ -420,7 +420,7 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytes } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderString } @@ -434,122 +434,122 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer // encoderFuncsForValue returns value functions for a field, used for // extension values and map encoding. -func encoderFuncsForValue(fd pref.FieldDescriptor) valueCoderFuncs { +func encoderFuncsForValue(fd protoreflect.FieldDescriptor) valueCoderFuncs { switch { - case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && !fd.IsPacked(): switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: return coderBoolSliceValue - case pref.EnumKind: + case protoreflect.EnumKind: return coderEnumSliceValue - case pref.Int32Kind: + case protoreflect.Int32Kind: return coderInt32SliceValue - case pref.Sint32Kind: + case protoreflect.Sint32Kind: return coderSint32SliceValue - case pref.Uint32Kind: + case protoreflect.Uint32Kind: return coderUint32SliceValue - case pref.Int64Kind: + case protoreflect.Int64Kind: return coderInt64SliceValue - case pref.Sint64Kind: + case protoreflect.Sint64Kind: return coderSint64SliceValue - case pref.Uint64Kind: + case protoreflect.Uint64Kind: return coderUint64SliceValue - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: return coderSfixed32SliceValue - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: return coderFixed32SliceValue - case pref.FloatKind: + case protoreflect.FloatKind: return coderFloatSliceValue - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: return coderSfixed64SliceValue - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: return coderFixed64SliceValue - case pref.DoubleKind: + case protoreflect.DoubleKind: return coderDoubleSliceValue - case pref.StringKind: + case protoreflect.StringKind: // We don't have a UTF-8 validating coder for repeated string fields. // Value coders are used for extensions and maps. // Extensions are never proto3, and maps never contain lists. return coderStringSliceValue - case pref.BytesKind: + case protoreflect.BytesKind: return coderBytesSliceValue - case pref.MessageKind: + case protoreflect.MessageKind: return coderMessageSliceValue - case pref.GroupKind: + case protoreflect.GroupKind: return coderGroupSliceValue } - case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && fd.IsPacked(): switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: return coderBoolPackedSliceValue - case pref.EnumKind: + case protoreflect.EnumKind: return coderEnumPackedSliceValue - case pref.Int32Kind: + case protoreflect.Int32Kind: return coderInt32PackedSliceValue - case pref.Sint32Kind: + case protoreflect.Sint32Kind: return coderSint32PackedSliceValue - case pref.Uint32Kind: + case protoreflect.Uint32Kind: return coderUint32PackedSliceValue - case pref.Int64Kind: + case protoreflect.Int64Kind: return coderInt64PackedSliceValue - case pref.Sint64Kind: + case protoreflect.Sint64Kind: return coderSint64PackedSliceValue - case pref.Uint64Kind: + case protoreflect.Uint64Kind: return coderUint64PackedSliceValue - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: return coderSfixed32PackedSliceValue - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: return coderFixed32PackedSliceValue - case pref.FloatKind: + case protoreflect.FloatKind: return coderFloatPackedSliceValue - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: return coderSfixed64PackedSliceValue - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: return coderFixed64PackedSliceValue - case pref.DoubleKind: + case protoreflect.DoubleKind: return coderDoublePackedSliceValue } default: switch fd.Kind() { default: - case pref.BoolKind: + case protoreflect.BoolKind: return coderBoolValue - case pref.EnumKind: + case protoreflect.EnumKind: return coderEnumValue - case pref.Int32Kind: + case protoreflect.Int32Kind: return coderInt32Value - case pref.Sint32Kind: + case protoreflect.Sint32Kind: return coderSint32Value - case pref.Uint32Kind: + case protoreflect.Uint32Kind: return coderUint32Value - case pref.Int64Kind: + case protoreflect.Int64Kind: return coderInt64Value - case pref.Sint64Kind: + case protoreflect.Sint64Kind: return coderSint64Value - case pref.Uint64Kind: + case protoreflect.Uint64Kind: return coderUint64Value - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: return coderSfixed32Value - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: return coderFixed32Value - case pref.FloatKind: + case protoreflect.FloatKind: return coderFloatValue - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: return coderSfixed64Value - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: return coderFixed64Value - case pref.DoubleKind: + case protoreflect.DoubleKind: return coderDoubleValue - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { return coderStringValueValidateUTF8 } return coderStringValue - case pref.BytesKind: + case protoreflect.BytesKind: return coderBytesValue - case pref.MessageKind: + case protoreflect.MessageKind: return coderMessageValue - case pref.GroupKind: + case protoreflect.GroupKind: return coderGroupValue } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index acd61bb50b2ca..11a6128ba56bd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -8,7 +8,7 @@ import ( "fmt" "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // unwrapper unwraps the value to the underlying value. @@ -20,13 +20,13 @@ type unwrapper interface { // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. type Converter interface { // PBValueOf converts a reflect.Value to a protoreflect.Value. - PBValueOf(reflect.Value) pref.Value + PBValueOf(reflect.Value) protoreflect.Value // GoValueOf converts a protoreflect.Value to a reflect.Value. - GoValueOf(pref.Value) reflect.Value + GoValueOf(protoreflect.Value) reflect.Value // IsValidPB returns whether a protoreflect.Value is compatible with this type. - IsValidPB(pref.Value) bool + IsValidPB(protoreflect.Value) bool // IsValidGo returns whether a reflect.Value is compatible with this type. IsValidGo(reflect.Value) bool @@ -34,12 +34,12 @@ type Converter interface { // New returns a new field value. // For scalars, it returns the default value of the field. // For composite types, it returns a new mutable value. - New() pref.Value + New() protoreflect.Value // Zero returns a new field value. // For scalars, it returns the default value of the field. // For composite types, it returns an immutable, empty value. - Zero() pref.Value + Zero() protoreflect.Value } // NewConverter matches a Go type with a protobuf field and returns a Converter @@ -50,7 +50,7 @@ type Converter interface { // This matcher deliberately supports a wider range of Go types than what // protoc-gen-go historically generated to be able to automatically wrap some // v1 messages generated by other forks of protoc-gen-go. -func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { +func NewConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { switch { case fd.IsList(): return newListConverter(t, fd) @@ -76,68 +76,68 @@ var ( ) var ( - boolZero = pref.ValueOfBool(false) - int32Zero = pref.ValueOfInt32(0) - int64Zero = pref.ValueOfInt64(0) - uint32Zero = pref.ValueOfUint32(0) - uint64Zero = pref.ValueOfUint64(0) - float32Zero = pref.ValueOfFloat32(0) - float64Zero = pref.ValueOfFloat64(0) - stringZero = pref.ValueOfString("") - bytesZero = pref.ValueOfBytes(nil) + boolZero = protoreflect.ValueOfBool(false) + int32Zero = protoreflect.ValueOfInt32(0) + int64Zero = protoreflect.ValueOfInt64(0) + uint32Zero = protoreflect.ValueOfUint32(0) + uint64Zero = protoreflect.ValueOfUint64(0) + float32Zero = protoreflect.ValueOfFloat32(0) + float64Zero = protoreflect.ValueOfFloat64(0) + stringZero = protoreflect.ValueOfString("") + bytesZero = protoreflect.ValueOfBytes(nil) ) -func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { - defVal := func(fd pref.FieldDescriptor, zero pref.Value) pref.Value { - if fd.Cardinality() == pref.Repeated { +func newSingularConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { + defVal := func(fd protoreflect.FieldDescriptor, zero protoreflect.Value) protoreflect.Value { + if fd.Cardinality() == protoreflect.Repeated { // Default isn't defined for repeated fields. return zero } return fd.Default() } switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if t.Kind() == reflect.Bool { return &boolConverter{t, defVal(fd, boolZero)} } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if t.Kind() == reflect.Int32 { return &int32Converter{t, defVal(fd, int32Zero)} } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if t.Kind() == reflect.Int64 { return &int64Converter{t, defVal(fd, int64Zero)} } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if t.Kind() == reflect.Uint32 { return &uint32Converter{t, defVal(fd, uint32Zero)} } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if t.Kind() == reflect.Uint64 { return &uint64Converter{t, defVal(fd, uint64Zero)} } - case pref.FloatKind: + case protoreflect.FloatKind: if t.Kind() == reflect.Float32 { return &float32Converter{t, defVal(fd, float32Zero)} } - case pref.DoubleKind: + case protoreflect.DoubleKind: if t.Kind() == reflect.Float64 { return &float64Converter{t, defVal(fd, float64Zero)} } - case pref.StringKind: + case protoreflect.StringKind: if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { return &stringConverter{t, defVal(fd, stringZero)} } - case pref.BytesKind: + case protoreflect.BytesKind: if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { return &bytesConverter{t, defVal(fd, bytesZero)} } - case pref.EnumKind: + case protoreflect.EnumKind: // Handle enums, which must be a named int32 type. if t.Kind() == reflect.Int32 { return newEnumConverter(t, fd) } - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: return newMessageConverter(t) } panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) @@ -145,184 +145,184 @@ func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { type boolConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *boolConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *boolConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfBool(v.Bool()) + return protoreflect.ValueOfBool(v.Bool()) } -func (c *boolConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *boolConverter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(v.Bool()).Convert(c.goType) } -func (c *boolConverter) IsValidPB(v pref.Value) bool { +func (c *boolConverter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(bool) return ok } func (c *boolConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *boolConverter) New() pref.Value { return c.def } -func (c *boolConverter) Zero() pref.Value { return c.def } +func (c *boolConverter) New() protoreflect.Value { return c.def } +func (c *boolConverter) Zero() protoreflect.Value { return c.def } type int32Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *int32Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *int32Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfInt32(int32(v.Int())) + return protoreflect.ValueOfInt32(int32(v.Int())) } -func (c *int32Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *int32Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(int32(v.Int())).Convert(c.goType) } -func (c *int32Converter) IsValidPB(v pref.Value) bool { +func (c *int32Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(int32) return ok } func (c *int32Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *int32Converter) New() pref.Value { return c.def } -func (c *int32Converter) Zero() pref.Value { return c.def } +func (c *int32Converter) New() protoreflect.Value { return c.def } +func (c *int32Converter) Zero() protoreflect.Value { return c.def } type int64Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *int64Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *int64Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfInt64(int64(v.Int())) + return protoreflect.ValueOfInt64(int64(v.Int())) } -func (c *int64Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *int64Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(int64(v.Int())).Convert(c.goType) } -func (c *int64Converter) IsValidPB(v pref.Value) bool { +func (c *int64Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(int64) return ok } func (c *int64Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *int64Converter) New() pref.Value { return c.def } -func (c *int64Converter) Zero() pref.Value { return c.def } +func (c *int64Converter) New() protoreflect.Value { return c.def } +func (c *int64Converter) Zero() protoreflect.Value { return c.def } type uint32Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *uint32Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *uint32Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfUint32(uint32(v.Uint())) + return protoreflect.ValueOfUint32(uint32(v.Uint())) } -func (c *uint32Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *uint32Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType) } -func (c *uint32Converter) IsValidPB(v pref.Value) bool { +func (c *uint32Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(uint32) return ok } func (c *uint32Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *uint32Converter) New() pref.Value { return c.def } -func (c *uint32Converter) Zero() pref.Value { return c.def } +func (c *uint32Converter) New() protoreflect.Value { return c.def } +func (c *uint32Converter) Zero() protoreflect.Value { return c.def } type uint64Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *uint64Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *uint64Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfUint64(uint64(v.Uint())) + return protoreflect.ValueOfUint64(uint64(v.Uint())) } -func (c *uint64Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *uint64Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType) } -func (c *uint64Converter) IsValidPB(v pref.Value) bool { +func (c *uint64Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(uint64) return ok } func (c *uint64Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *uint64Converter) New() pref.Value { return c.def } -func (c *uint64Converter) Zero() pref.Value { return c.def } +func (c *uint64Converter) New() protoreflect.Value { return c.def } +func (c *uint64Converter) Zero() protoreflect.Value { return c.def } type float32Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *float32Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *float32Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfFloat32(float32(v.Float())) + return protoreflect.ValueOfFloat32(float32(v.Float())) } -func (c *float32Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *float32Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(float32(v.Float())).Convert(c.goType) } -func (c *float32Converter) IsValidPB(v pref.Value) bool { +func (c *float32Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(float32) return ok } func (c *float32Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *float32Converter) New() pref.Value { return c.def } -func (c *float32Converter) Zero() pref.Value { return c.def } +func (c *float32Converter) New() protoreflect.Value { return c.def } +func (c *float32Converter) Zero() protoreflect.Value { return c.def } type float64Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *float64Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *float64Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfFloat64(float64(v.Float())) + return protoreflect.ValueOfFloat64(float64(v.Float())) } -func (c *float64Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *float64Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(float64(v.Float())).Convert(c.goType) } -func (c *float64Converter) IsValidPB(v pref.Value) bool { +func (c *float64Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(float64) return ok } func (c *float64Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *float64Converter) New() pref.Value { return c.def } -func (c *float64Converter) Zero() pref.Value { return c.def } +func (c *float64Converter) New() protoreflect.Value { return c.def } +func (c *float64Converter) Zero() protoreflect.Value { return c.def } type stringConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *stringConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfString(v.Convert(stringType).String()) + return protoreflect.ValueOfString(v.Convert(stringType).String()) } -func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { // pref.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) @@ -331,71 +331,71 @@ func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { } return reflect.ValueOf(s).Convert(c.goType) } -func (c *stringConverter) IsValidPB(v pref.Value) bool { +func (c *stringConverter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(string) return ok } func (c *stringConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *stringConverter) New() pref.Value { return c.def } -func (c *stringConverter) Zero() pref.Value { return c.def } +func (c *stringConverter) New() protoreflect.Value { return c.def } +func (c *stringConverter) Zero() protoreflect.Value { return c.def } type bytesConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *bytesConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *bytesConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } if c.goType.Kind() == reflect.String && v.Len() == 0 { - return pref.ValueOfBytes(nil) // ensure empty string is []byte(nil) + return protoreflect.ValueOfBytes(nil) // ensure empty string is []byte(nil) } - return pref.ValueOfBytes(v.Convert(bytesType).Bytes()) + return protoreflect.ValueOfBytes(v.Convert(bytesType).Bytes()) } -func (c *bytesConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *bytesConverter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(v.Bytes()).Convert(c.goType) } -func (c *bytesConverter) IsValidPB(v pref.Value) bool { +func (c *bytesConverter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().([]byte) return ok } func (c *bytesConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *bytesConverter) New() pref.Value { return c.def } -func (c *bytesConverter) Zero() pref.Value { return c.def } +func (c *bytesConverter) New() protoreflect.Value { return c.def } +func (c *bytesConverter) Zero() protoreflect.Value { return c.def } type enumConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func newEnumConverter(goType reflect.Type, fd pref.FieldDescriptor) Converter { - var def pref.Value - if fd.Cardinality() == pref.Repeated { - def = pref.ValueOfEnum(fd.Enum().Values().Get(0).Number()) +func newEnumConverter(goType reflect.Type, fd protoreflect.FieldDescriptor) Converter { + var def protoreflect.Value + if fd.Cardinality() == protoreflect.Repeated { + def = protoreflect.ValueOfEnum(fd.Enum().Values().Get(0).Number()) } else { def = fd.Default() } return &enumConverter{goType, def} } -func (c *enumConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *enumConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfEnum(pref.EnumNumber(v.Int())) + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v.Int())) } -func (c *enumConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *enumConverter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(v.Enum()).Convert(c.goType) } -func (c *enumConverter) IsValidPB(v pref.Value) bool { - _, ok := v.Interface().(pref.EnumNumber) +func (c *enumConverter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(protoreflect.EnumNumber) return ok } @@ -403,11 +403,11 @@ func (c *enumConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *enumConverter) New() pref.Value { +func (c *enumConverter) New() protoreflect.Value { return c.def } -func (c *enumConverter) Zero() pref.Value { +func (c *enumConverter) Zero() protoreflect.Value { return c.def } @@ -419,7 +419,7 @@ func newMessageConverter(goType reflect.Type) Converter { return &messageConverter{goType} } -func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *messageConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } @@ -430,13 +430,13 @@ func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { v = reflect.Zero(reflect.PtrTo(v.Type())) } } - if m, ok := v.Interface().(pref.ProtoMessage); ok { - return pref.ValueOfMessage(m.ProtoReflect()) + if m, ok := v.Interface().(protoreflect.ProtoMessage); ok { + return protoreflect.ValueOfMessage(m.ProtoReflect()) } - return pref.ValueOfMessage(legacyWrapMessage(v)) + return protoreflect.ValueOfMessage(legacyWrapMessage(v)) } -func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *messageConverter) GoValueOf(v protoreflect.Value) reflect.Value { m := v.Message() var rv reflect.Value if u, ok := m.(unwrapper); ok { @@ -460,7 +460,7 @@ func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { return rv } -func (c *messageConverter) IsValidPB(v pref.Value) bool { +func (c *messageConverter) IsValidPB(v protoreflect.Value) bool { m := v.Message() var rv reflect.Value if u, ok := m.(unwrapper); ok { @@ -478,14 +478,14 @@ func (c *messageConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *messageConverter) New() pref.Value { +func (c *messageConverter) New() protoreflect.Value { if c.isNonPointer() { return c.PBValueOf(reflect.New(c.goType).Elem()) } return c.PBValueOf(reflect.New(c.goType.Elem())) } -func (c *messageConverter) Zero() pref.Value { +func (c *messageConverter) Zero() protoreflect.Value { return c.PBValueOf(reflect.Zero(c.goType)) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index 6fccab520e59a..f89136516f96d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -8,10 +8,10 @@ import ( "fmt" "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) -func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { +func newListConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { switch { case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice: return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)} @@ -26,16 +26,16 @@ type listConverter struct { c Converter } -func (c *listConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *listConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } pv := reflect.New(c.goType) pv.Elem().Set(v) - return pref.ValueOfList(&listReflect{pv, c.c}) + return protoreflect.ValueOfList(&listReflect{pv, c.c}) } -func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *listConverter) GoValueOf(v protoreflect.Value) reflect.Value { rv := v.List().(*listReflect).v if rv.IsNil() { return reflect.Zero(c.goType) @@ -43,7 +43,7 @@ func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { return rv.Elem() } -func (c *listConverter) IsValidPB(v pref.Value) bool { +func (c *listConverter) IsValidPB(v protoreflect.Value) bool { list, ok := v.Interface().(*listReflect) if !ok { return false @@ -55,12 +55,12 @@ func (c *listConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *listConverter) New() pref.Value { - return pref.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) +func (c *listConverter) New() protoreflect.Value { + return protoreflect.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) } -func (c *listConverter) Zero() pref.Value { - return pref.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) +func (c *listConverter) Zero() protoreflect.Value { + return protoreflect.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) } type listPtrConverter struct { @@ -68,18 +68,18 @@ type listPtrConverter struct { c Converter } -func (c *listPtrConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *listPtrConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfList(&listReflect{v, c.c}) + return protoreflect.ValueOfList(&listReflect{v, c.c}) } -func (c *listPtrConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *listPtrConverter) GoValueOf(v protoreflect.Value) reflect.Value { return v.List().(*listReflect).v } -func (c *listPtrConverter) IsValidPB(v pref.Value) bool { +func (c *listPtrConverter) IsValidPB(v protoreflect.Value) bool { list, ok := v.Interface().(*listReflect) if !ok { return false @@ -91,11 +91,11 @@ func (c *listPtrConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *listPtrConverter) New() pref.Value { +func (c *listPtrConverter) New() protoreflect.Value { return c.PBValueOf(reflect.New(c.goType.Elem())) } -func (c *listPtrConverter) Zero() pref.Value { +func (c *listPtrConverter) Zero() protoreflect.Value { return c.PBValueOf(reflect.Zero(c.goType)) } @@ -110,16 +110,16 @@ func (ls *listReflect) Len() int { } return ls.v.Elem().Len() } -func (ls *listReflect) Get(i int) pref.Value { +func (ls *listReflect) Get(i int) protoreflect.Value { return ls.conv.PBValueOf(ls.v.Elem().Index(i)) } -func (ls *listReflect) Set(i int, v pref.Value) { +func (ls *listReflect) Set(i int, v protoreflect.Value) { ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v)) } -func (ls *listReflect) Append(v pref.Value) { +func (ls *listReflect) Append(v protoreflect.Value) { ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v))) } -func (ls *listReflect) AppendMutable() pref.Value { +func (ls *listReflect) AppendMutable() protoreflect.Value { if _, ok := ls.conv.(*messageConverter); !ok { panic("invalid AppendMutable on list with non-message type") } @@ -130,7 +130,7 @@ func (ls *listReflect) AppendMutable() pref.Value { func (ls *listReflect) Truncate(i int) { ls.v.Elem().Set(ls.v.Elem().Slice(0, i)) } -func (ls *listReflect) NewElement() pref.Value { +func (ls *listReflect) NewElement() protoreflect.Value { return ls.conv.New() } func (ls *listReflect) IsValid() bool { diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index de06b2593f89a..f30b0a0576de3 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -8,7 +8,7 @@ import ( "fmt" "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type mapConverter struct { @@ -16,7 +16,7 @@ type mapConverter struct { keyConv, valConv Converter } -func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { +func newMapConverter(t reflect.Type, fd protoreflect.FieldDescriptor) *mapConverter { if t.Kind() != reflect.Map { panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) } @@ -27,18 +27,18 @@ func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { } } -func (c *mapConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *mapConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) + return protoreflect.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) } -func (c *mapConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *mapConverter) GoValueOf(v protoreflect.Value) reflect.Value { return v.Map().(*mapReflect).v } -func (c *mapConverter) IsValidPB(v pref.Value) bool { +func (c *mapConverter) IsValidPB(v protoreflect.Value) bool { mapv, ok := v.Interface().(*mapReflect) if !ok { return false @@ -50,11 +50,11 @@ func (c *mapConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *mapConverter) New() pref.Value { +func (c *mapConverter) New() protoreflect.Value { return c.PBValueOf(reflect.MakeMap(c.goType)) } -func (c *mapConverter) Zero() pref.Value { +func (c *mapConverter) Zero() protoreflect.Value { return c.PBValueOf(reflect.Zero(c.goType)) } @@ -67,29 +67,29 @@ type mapReflect struct { func (ms *mapReflect) Len() int { return ms.v.Len() } -func (ms *mapReflect) Has(k pref.MapKey) bool { +func (ms *mapReflect) Has(k protoreflect.MapKey) bool { rk := ms.keyConv.GoValueOf(k.Value()) rv := ms.v.MapIndex(rk) return rv.IsValid() } -func (ms *mapReflect) Get(k pref.MapKey) pref.Value { +func (ms *mapReflect) Get(k protoreflect.MapKey) protoreflect.Value { rk := ms.keyConv.GoValueOf(k.Value()) rv := ms.v.MapIndex(rk) if !rv.IsValid() { - return pref.Value{} + return protoreflect.Value{} } return ms.valConv.PBValueOf(rv) } -func (ms *mapReflect) Set(k pref.MapKey, v pref.Value) { +func (ms *mapReflect) Set(k protoreflect.MapKey, v protoreflect.Value) { rk := ms.keyConv.GoValueOf(k.Value()) rv := ms.valConv.GoValueOf(v) ms.v.SetMapIndex(rk, rv) } -func (ms *mapReflect) Clear(k pref.MapKey) { +func (ms *mapReflect) Clear(k protoreflect.MapKey) { rk := ms.keyConv.GoValueOf(k.Value()) ms.v.SetMapIndex(rk, reflect.Value{}) } -func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { +func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value { if _, ok := ms.valConv.(*messageConverter); !ok { panic("invalid Mutable on map with non-message value type") } @@ -100,7 +100,7 @@ func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { } return v } -func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { +func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { iter := mapRange(ms.v) for iter.Next() { k := ms.keyConv.PBValueOf(iter.Key()).MapKey() @@ -110,7 +110,7 @@ func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { } } } -func (ms *mapReflect) NewValue() pref.Value { +func (ms *mapReflect) NewValue() protoreflect.Value { return ms.valConv.New() } func (ms *mapReflect) IsValid() bool { diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index c65b0325c17e4..cda0520c275cb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -12,9 +12,8 @@ import ( "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoiface" - piface "google.golang.org/protobuf/runtime/protoiface" ) var errDecode = errors.New("cannot parse invalid wire-format data") @@ -38,14 +37,16 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions { } } -func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&piface.UnmarshalDiscardUnknown != 0 } +func (o unmarshalOptions) DiscardUnknown() bool { + return o.flags&protoiface.UnmarshalDiscardUnknown != 0 +} func (o unmarshalOptions) IsDefault() bool { - return o.flags == 0 && o.resolver == preg.GlobalTypes + return o.flags == 0 && o.resolver == protoregistry.GlobalTypes } var lazyUnmarshalOptions = unmarshalOptions{ - resolver: preg.GlobalTypes, + resolver: protoregistry.GlobalTypes, depth: protowire.DefaultRecursionLimit, } @@ -55,7 +56,7 @@ type unmarshalOutput struct { } // unmarshal is protoreflect.Methods.Unmarshal. -func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { +func (mi *MessageInfo) unmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { var p pointer if ms, ok := in.Message.(*messageState); ok { p = ms.pointer() @@ -67,11 +68,11 @@ func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutp resolver: in.Resolver, depth: in.Depth, }) - var flags piface.UnmarshalOutputFlags + var flags protoiface.UnmarshalOutputFlags if out.initialized { - flags |= piface.UnmarshalInitialized + flags |= protoiface.UnmarshalInitialized } - return piface.UnmarshalOutput{ + return protoiface.UnmarshalOutput{ Flags: flags, }, err } @@ -210,7 +211,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p var err error xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num) if err != nil { - if err == preg.NotFound { + if err == protoregistry.NotFound { return out, errUnknown } return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err) diff --git a/vendor/google.golang.org/protobuf/internal/impl/enum.go b/vendor/google.golang.org/protobuf/internal/impl/enum.go index 8c1eab4bfd869..5f3ef5ad732f6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/enum.go @@ -7,15 +7,15 @@ package impl import ( "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type EnumInfo struct { GoReflectType reflect.Type // int32 kind - Desc pref.EnumDescriptor + Desc protoreflect.EnumDescriptor } -func (t *EnumInfo) New(n pref.EnumNumber) pref.Enum { - return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(pref.Enum) +func (t *EnumInfo) New(n protoreflect.EnumNumber) protoreflect.Enum { + return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(protoreflect.Enum) } -func (t *EnumInfo) Descriptor() pref.EnumDescriptor { return t.Desc } +func (t *EnumInfo) Descriptor() protoreflect.EnumDescriptor { return t.Desc } diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index e904fd993657c..cb25b0bae1d71 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -9,8 +9,8 @@ import ( "sync" "sync/atomic" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // ExtensionInfo implements ExtensionType. @@ -45,7 +45,7 @@ type ExtensionInfo struct { // since the message may no longer implement the MessageV1 interface. // // Deprecated: Use the ExtendedType method instead. - ExtendedType piface.MessageV1 + ExtendedType protoiface.MessageV1 // ExtensionType is the zero value of the extension type. // @@ -83,31 +83,31 @@ const ( extensionInfoFullInit = 2 ) -func InitExtensionInfo(xi *ExtensionInfo, xd pref.ExtensionDescriptor, goType reflect.Type) { +func InitExtensionInfo(xi *ExtensionInfo, xd protoreflect.ExtensionDescriptor, goType reflect.Type) { xi.goType = goType xi.desc = extensionTypeDescriptor{xd, xi} xi.init = extensionInfoDescInit } -func (xi *ExtensionInfo) New() pref.Value { +func (xi *ExtensionInfo) New() protoreflect.Value { return xi.lazyInit().New() } -func (xi *ExtensionInfo) Zero() pref.Value { +func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) pref.Value { +func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v pref.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { return xi.lazyInit().GoValueOf(v).Interface() } -func (xi *ExtensionInfo) IsValidValue(v pref.Value) bool { +func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) TypeDescriptor() pref.ExtensionTypeDescriptor { +func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { if atomic.LoadUint32(&xi.init) < extensionInfoDescInit { xi.lazyInitSlow() } @@ -144,13 +144,13 @@ func (xi *ExtensionInfo) lazyInitSlow() { } type extensionTypeDescriptor struct { - pref.ExtensionDescriptor + protoreflect.ExtensionDescriptor xi *ExtensionInfo } -func (xtd *extensionTypeDescriptor) Type() pref.ExtensionType { +func (xtd *extensionTypeDescriptor) Type() protoreflect.ExtensionType { return xtd.xi } -func (xtd *extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor { +func (xtd *extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor { return xtd.ExtensionDescriptor } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index f7d7ffb51039e..c2a803bb2f929 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -13,13 +13,12 @@ import ( "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" ) // legacyEnumName returns the name of enums used in legacy code. // It is neither the protobuf full name nor the qualified Go name, // but rather an odd hybrid of both. -func legacyEnumName(ed pref.EnumDescriptor) string { +func legacyEnumName(ed protoreflect.EnumDescriptor) string { var protoPkg string enumName := string(ed.FullName()) if fd := ed.ParentFile(); fd != nil { @@ -34,68 +33,68 @@ func legacyEnumName(ed pref.EnumDescriptor) string { // legacyWrapEnum wraps v as a protoreflect.Enum, // where v must be a int32 kind and not implement the v2 API already. -func legacyWrapEnum(v reflect.Value) pref.Enum { +func legacyWrapEnum(v reflect.Value) protoreflect.Enum { et := legacyLoadEnumType(v.Type()) - return et.New(pref.EnumNumber(v.Int())) + return et.New(protoreflect.EnumNumber(v.Int())) } var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType // legacyLoadEnumType dynamically loads a protoreflect.EnumType for t, // where t must be an int32 kind and not implement the v2 API already. -func legacyLoadEnumType(t reflect.Type) pref.EnumType { +func legacyLoadEnumType(t reflect.Type) protoreflect.EnumType { // Fast-path: check if a EnumType is cached for this concrete type. if et, ok := legacyEnumTypeCache.Load(t); ok { - return et.(pref.EnumType) + return et.(protoreflect.EnumType) } // Slow-path: derive enum descriptor and initialize EnumType. - var et pref.EnumType + var et protoreflect.EnumType ed := LegacyLoadEnumDesc(t) et = &legacyEnumType{ desc: ed, goType: t, } if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok { - return et.(pref.EnumType) + return et.(protoreflect.EnumType) } return et } type legacyEnumType struct { - desc pref.EnumDescriptor + desc protoreflect.EnumDescriptor goType reflect.Type m sync.Map // map[protoreflect.EnumNumber]proto.Enum } -func (t *legacyEnumType) New(n pref.EnumNumber) pref.Enum { +func (t *legacyEnumType) New(n protoreflect.EnumNumber) protoreflect.Enum { if e, ok := t.m.Load(n); ok { - return e.(pref.Enum) + return e.(protoreflect.Enum) } e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType} t.m.Store(n, e) return e } -func (t *legacyEnumType) Descriptor() pref.EnumDescriptor { +func (t *legacyEnumType) Descriptor() protoreflect.EnumDescriptor { return t.desc } type legacyEnumWrapper struct { - num pref.EnumNumber - pbTyp pref.EnumType + num protoreflect.EnumNumber + pbTyp protoreflect.EnumType goTyp reflect.Type } -func (e *legacyEnumWrapper) Descriptor() pref.EnumDescriptor { +func (e *legacyEnumWrapper) Descriptor() protoreflect.EnumDescriptor { return e.pbTyp.Descriptor() } -func (e *legacyEnumWrapper) Type() pref.EnumType { +func (e *legacyEnumWrapper) Type() protoreflect.EnumType { return e.pbTyp } -func (e *legacyEnumWrapper) Number() pref.EnumNumber { +func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { return e.num } -func (e *legacyEnumWrapper) ProtoReflect() pref.Enum { +func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } func (e *legacyEnumWrapper) protoUnwrap() interface{} { @@ -105,8 +104,8 @@ func (e *legacyEnumWrapper) protoUnwrap() interface{} { } var ( - _ pref.Enum = (*legacyEnumWrapper)(nil) - _ unwrapper = (*legacyEnumWrapper)(nil) + _ protoreflect.Enum = (*legacyEnumWrapper)(nil) + _ unwrapper = (*legacyEnumWrapper)(nil) ) var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor @@ -115,15 +114,15 @@ var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor // which must be an int32 kind and not implement the v2 API already. // // This is exported for testing purposes. -func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { +func LegacyLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { // Fast-path: check if an EnumDescriptor is cached for this concrete type. if ed, ok := legacyEnumDescCache.Load(t); ok { - return ed.(pref.EnumDescriptor) + return ed.(protoreflect.EnumDescriptor) } // Slow-path: initialize EnumDescriptor from the raw descriptor. ev := reflect.Zero(t).Interface() - if _, ok := ev.(pref.Enum); ok { + if _, ok := ev.(protoreflect.Enum); ok { panic(fmt.Sprintf("%v already implements proto.Enum", t)) } edV1, ok := ev.(enumV1) @@ -132,7 +131,7 @@ func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { } b, idxs := edV1.EnumDescriptor() - var ed pref.EnumDescriptor + var ed protoreflect.EnumDescriptor if len(idxs) == 1 { ed = legacyLoadFileDesc(b).Enums().Get(idxs[0]) } else { @@ -158,10 +157,10 @@ var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescript // We are unable to use the global enum registry since it is // unfortunately keyed by the protobuf full name, which we also do not know. // Thus, this produces some bogus enum descriptor based on the Go type name. -func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { +func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { // Fast-path: check if an EnumDescriptor is cached for this concrete type. if ed, ok := aberrantEnumDescCache.Load(t); ok { - return ed.(pref.EnumDescriptor) + return ed.(protoreflect.EnumDescriptor) } // Slow-path: construct a bogus, but unique EnumDescriptor. @@ -182,7 +181,7 @@ func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { // An exhaustive query is clearly impractical, but can be best-effort. if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok { - return ed.(pref.EnumDescriptor) + return ed.(protoreflect.EnumDescriptor) } return ed } @@ -192,7 +191,7 @@ func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { // It should be sufficiently unique within a program. // // This is exported for testing purposes. -func AberrantDeriveFullName(t reflect.Type) pref.FullName { +func AberrantDeriveFullName(t reflect.Type) protoreflect.FullName { sanitize := func(r rune) rune { switch { case r == '/': @@ -215,5 +214,5 @@ func AberrantDeriveFullName(t reflect.Type) pref.FullName { ss[i] = "x" + s } } - return pref.FullName(strings.Join(ss, ".")) + return protoreflect.FullName(strings.Join(ss, ".")) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go index e3fb0b578586c..9b64ad5bba285 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -12,21 +12,21 @@ import ( "reflect" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // These functions exist to support exported APIs in generated protobufs. // While these are deprecated, they cannot be removed for compatibility reasons. // LegacyEnumName returns the name of enums used in legacy code. -func (Export) LegacyEnumName(ed pref.EnumDescriptor) string { +func (Export) LegacyEnumName(ed protoreflect.EnumDescriptor) string { return legacyEnumName(ed) } // LegacyMessageTypeOf returns the protoreflect.MessageType for m, // with name used as the message name if necessary. -func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.MessageType { +func (Export) LegacyMessageTypeOf(m protoiface.MessageV1, name protoreflect.FullName) protoreflect.MessageType { if mv := (Export{}).protoMessageV2Of(m); mv != nil { return mv.ProtoReflect().Type() } @@ -36,9 +36,9 @@ func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.M // UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. // The input can either be a string representing the enum value by name, // or a number representing the enum number itself. -func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumber, error) { +func (Export) UnmarshalJSONEnum(ed protoreflect.EnumDescriptor, b []byte) (protoreflect.EnumNumber, error) { if b[0] == '"' { - var name pref.Name + var name protoreflect.Name if err := json.Unmarshal(b, &name); err != nil { return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) } @@ -48,7 +48,7 @@ func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumb } return ev.Number(), nil } else { - var num pref.EnumNumber + var num protoreflect.EnumNumber if err := json.Unmarshal(b, &num); err != nil { return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) } @@ -81,8 +81,8 @@ func (Export) CompressGZIP(in []byte) (out []byte) { blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3. blockSize = len(in) } - binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)^0x0000) - binary.LittleEndian.PutUint16(blockHeader[3:5], uint16(blockSize)^0xffff) + binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)) + binary.LittleEndian.PutUint16(blockHeader[3:5], ^uint16(blockSize)) out = append(out, blockHeader[:]...) out = append(out, in[:blockSize]...) in = in[blockSize:] diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 49e723161c018..87b30d0504c17 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -12,16 +12,16 @@ import ( ptag "google.golang.org/protobuf/internal/encoding/tag" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" ) func (xi *ExtensionInfo) initToLegacy() { xd := xi.desc - var parent piface.MessageV1 + var parent protoiface.MessageV1 messageName := xd.ContainingMessage().FullName() - if mt, _ := preg.GlobalTypes.FindMessageByName(messageName); mt != nil { + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(messageName); mt != nil { // Create a new parent message and unwrap it if possible. mv := mt.New().Interface() t := reflect.TypeOf(mv) @@ -31,7 +31,7 @@ func (xi *ExtensionInfo) initToLegacy() { // Check whether the message implements the legacy v1 Message interface. mz := reflect.Zero(t).Interface() - if mz, ok := mz.(piface.MessageV1); ok { + if mz, ok := mz.(protoiface.MessageV1); ok { parent = mz } } @@ -46,7 +46,7 @@ func (xi *ExtensionInfo) initToLegacy() { // Reconstruct the legacy enum full name. var enumName string - if xd.Kind() == pref.EnumKind { + if xd.Kind() == protoreflect.EnumKind { enumName = legacyEnumName(xd.Enum()) } @@ -77,16 +77,16 @@ func (xi *ExtensionInfo) initFromLegacy() { // field number is specified. In such a case, use a placeholder. if xi.ExtendedType == nil || xi.ExtensionType == nil { xd := placeholderExtension{ - name: pref.FullName(xi.Name), - number: pref.FieldNumber(xi.Field), + name: protoreflect.FullName(xi.Name), + number: protoreflect.FieldNumber(xi.Field), } xi.desc = extensionTypeDescriptor{xd, xi} return } // Resolve enum or message dependencies. - var ed pref.EnumDescriptor - var md pref.MessageDescriptor + var ed protoreflect.EnumDescriptor + var md protoreflect.MessageDescriptor t := reflect.TypeOf(xi.ExtensionType) isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 @@ -94,18 +94,18 @@ func (xi *ExtensionInfo) initFromLegacy() { t = t.Elem() } switch v := reflect.Zero(t).Interface().(type) { - case pref.Enum: + case protoreflect.Enum: ed = v.Descriptor() case enumV1: ed = LegacyLoadEnumDesc(t) - case pref.ProtoMessage: + case protoreflect.ProtoMessage: md = v.ProtoReflect().Descriptor() case messageV1: md = LegacyLoadMessageDesc(t) } // Derive basic field information from the struct tag. - var evs pref.EnumValueDescriptors + var evs protoreflect.EnumValueDescriptors if ed != nil { evs = ed.Values() } @@ -114,8 +114,8 @@ func (xi *ExtensionInfo) initFromLegacy() { // Construct a v2 ExtensionType. xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)} xd.L0.ParentFile = filedesc.SurrogateProto2 - xd.L0.FullName = pref.FullName(xi.Name) - xd.L1.Number = pref.FieldNumber(xi.Field) + xd.L0.FullName = protoreflect.FullName(xi.Name) + xd.L1.Number = protoreflect.FieldNumber(xi.Field) xd.L1.Cardinality = fd.L1.Cardinality xd.L1.Kind = fd.L1.Kind xd.L2.IsPacked = fd.L1.IsPacked @@ -138,39 +138,39 @@ func (xi *ExtensionInfo) initFromLegacy() { } type placeholderExtension struct { - name pref.FullName - number pref.FieldNumber + name protoreflect.FullName + number protoreflect.FieldNumber } -func (x placeholderExtension) ParentFile() pref.FileDescriptor { return nil } -func (x placeholderExtension) Parent() pref.Descriptor { return nil } -func (x placeholderExtension) Index() int { return 0 } -func (x placeholderExtension) Syntax() pref.Syntax { return 0 } -func (x placeholderExtension) Name() pref.Name { return x.name.Name() } -func (x placeholderExtension) FullName() pref.FullName { return x.name } -func (x placeholderExtension) IsPlaceholder() bool { return true } -func (x placeholderExtension) Options() pref.ProtoMessage { return descopts.Field } -func (x placeholderExtension) Number() pref.FieldNumber { return x.number } -func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } -func (x placeholderExtension) Kind() pref.Kind { return 0 } -func (x placeholderExtension) HasJSONName() bool { return false } -func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } -func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } -func (x placeholderExtension) HasPresence() bool { return false } -func (x placeholderExtension) HasOptionalKeyword() bool { return false } -func (x placeholderExtension) IsExtension() bool { return true } -func (x placeholderExtension) IsWeak() bool { return false } -func (x placeholderExtension) IsPacked() bool { return false } -func (x placeholderExtension) IsList() bool { return false } -func (x placeholderExtension) IsMap() bool { return false } -func (x placeholderExtension) MapKey() pref.FieldDescriptor { return nil } -func (x placeholderExtension) MapValue() pref.FieldDescriptor { return nil } -func (x placeholderExtension) HasDefault() bool { return false } -func (x placeholderExtension) Default() pref.Value { return pref.Value{} } -func (x placeholderExtension) DefaultEnumValue() pref.EnumValueDescriptor { return nil } -func (x placeholderExtension) ContainingOneof() pref.OneofDescriptor { return nil } -func (x placeholderExtension) ContainingMessage() pref.MessageDescriptor { return nil } -func (x placeholderExtension) Enum() pref.EnumDescriptor { return nil } -func (x placeholderExtension) Message() pref.MessageDescriptor { return nil } -func (x placeholderExtension) ProtoType(pref.FieldDescriptor) { return } -func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } +func (x placeholderExtension) ParentFile() protoreflect.FileDescriptor { return nil } +func (x placeholderExtension) Parent() protoreflect.Descriptor { return nil } +func (x placeholderExtension) Index() int { return 0 } +func (x placeholderExtension) Syntax() protoreflect.Syntax { return 0 } +func (x placeholderExtension) Name() protoreflect.Name { return x.name.Name() } +func (x placeholderExtension) FullName() protoreflect.FullName { return x.name } +func (x placeholderExtension) IsPlaceholder() bool { return true } +func (x placeholderExtension) Options() protoreflect.ProtoMessage { return descopts.Field } +func (x placeholderExtension) Number() protoreflect.FieldNumber { return x.number } +func (x placeholderExtension) Cardinality() protoreflect.Cardinality { return 0 } +func (x placeholderExtension) Kind() protoreflect.Kind { return 0 } +func (x placeholderExtension) HasJSONName() bool { return false } +func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) HasPresence() bool { return false } +func (x placeholderExtension) HasOptionalKeyword() bool { return false } +func (x placeholderExtension) IsExtension() bool { return true } +func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsPacked() bool { return false } +func (x placeholderExtension) IsList() bool { return false } +func (x placeholderExtension) IsMap() bool { return false } +func (x placeholderExtension) MapKey() protoreflect.FieldDescriptor { return nil } +func (x placeholderExtension) MapValue() protoreflect.FieldDescriptor { return nil } +func (x placeholderExtension) HasDefault() bool { return false } +func (x placeholderExtension) Default() protoreflect.Value { return protoreflect.Value{} } +func (x placeholderExtension) DefaultEnumValue() protoreflect.EnumValueDescriptor { return nil } +func (x placeholderExtension) ContainingOneof() protoreflect.OneofDescriptor { return nil } +func (x placeholderExtension) ContainingMessage() protoreflect.MessageDescriptor { return nil } +func (x placeholderExtension) Enum() protoreflect.EnumDescriptor { return nil } +func (x placeholderExtension) Message() protoreflect.MessageDescriptor { return nil } +func (x placeholderExtension) ProtoType(protoreflect.FieldDescriptor) { return } +func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 029feeefd792b..61c483fac06ef 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -16,14 +16,12 @@ import ( "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" - piface "google.golang.org/protobuf/runtime/protoiface" ) // legacyWrapMessage wraps v as a protoreflect.Message, // where v must be a *struct kind and not implement the v2 API already. -func legacyWrapMessage(v reflect.Value) pref.Message { +func legacyWrapMessage(v reflect.Value) protoreflect.Message { t := v.Type() if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return aberrantMessage{v: v} @@ -35,7 +33,7 @@ func legacyWrapMessage(v reflect.Value) pref.Message { // legacyLoadMessageType dynamically loads a protoreflect.Type for t, // where t must be not implement the v2 API already. // The provided name is used if it cannot be determined from the message. -func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType { +func legacyLoadMessageType(t reflect.Type, name protoreflect.FullName) protoreflect.MessageType { if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return aberrantMessageType{t} } @@ -47,7 +45,7 @@ var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo // legacyLoadMessageInfo dynamically loads a *MessageInfo for t, // where t must be a *struct kind and not implement the v2 API already. // The provided name is used if it cannot be determined from the message. -func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { +func legacyLoadMessageInfo(t reflect.Type, name protoreflect.FullName) *MessageInfo { // Fast-path: check if a MessageInfo is cached for this concrete type. if mt, ok := legacyMessageTypeCache.Load(t); ok { return mt.(*MessageInfo) @@ -68,7 +66,7 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { // supports deterministic serialization or not, but this // preserves the v1 implementation's behavior of always // calling Marshal methods when present. - mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Flags |= protoiface.SupportMarshalDeterministic } if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal { mi.methods.Unmarshal = legacyUnmarshal @@ -89,18 +87,18 @@ var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDesc // which should be a *struct kind and must not implement the v2 API already. // // This is exported for testing purposes. -func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { +func LegacyLoadMessageDesc(t reflect.Type) protoreflect.MessageDescriptor { return legacyLoadMessageDesc(t, "") } -func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { +func legacyLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { // Fast-path: check if a MessageDescriptor is cached for this concrete type. if mi, ok := legacyMessageDescCache.Load(t); ok { - return mi.(pref.MessageDescriptor) + return mi.(protoreflect.MessageDescriptor) } // Slow-path: initialize MessageDescriptor from the raw descriptor. mv := reflect.Zero(t).Interface() - if _, ok := mv.(pref.ProtoMessage); ok { + if _, ok := mv.(protoreflect.ProtoMessage); ok { panic(fmt.Sprintf("%v already implements proto.Message", t)) } mdV1, ok := mv.(messageV1) @@ -164,7 +162,7 @@ var ( // // This is a best-effort derivation of the message descriptor using the protobuf // tags on the struct fields. -func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { +func aberrantLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { aberrantMessageDescLock.Lock() defer aberrantMessageDescLock.Unlock() if aberrantMessageDescCache == nil { @@ -172,7 +170,7 @@ func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDes } return aberrantLoadMessageDescReentrant(t, name) } -func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.MessageDescriptor { +func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { // Fast-path: check if an MessageDescriptor is cached for this concrete type. if md, ok := aberrantMessageDescCache[t]; ok { return md @@ -225,9 +223,9 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0] for i := 0; i < vs.Len(); i++ { v := vs.Index(i) - md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]pref.FieldNumber{ - pref.FieldNumber(v.FieldByName("Start").Int()), - pref.FieldNumber(v.FieldByName("End").Int() + 1), + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(v.FieldByName("Start").Int()), + protoreflect.FieldNumber(v.FieldByName("End").Int() + 1), }) md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil) } @@ -245,7 +243,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M n := len(md.L2.Oneofs.List) md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{}) od := &md.L2.Oneofs.List[n] - od.L0.FullName = md.FullName().Append(pref.Name(tag)) + od.L0.FullName = md.FullName().Append(protoreflect.Name(tag)) od.L0.ParentFile = md.L0.ParentFile od.L0.Parent = md od.L0.Index = n @@ -267,14 +265,14 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M return md } -func aberrantDeriveMessageName(t reflect.Type, name pref.FullName) pref.FullName { +func aberrantDeriveMessageName(t reflect.Type, name protoreflect.FullName) protoreflect.FullName { if name.IsValid() { return name } func() { defer func() { recover() }() // swallow possible nil panics if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok { - name = pref.FullName(m.XXX_MessageName()) + name = protoreflect.FullName(m.XXX_MessageName()) } }() if name.IsValid() { @@ -305,7 +303,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Index = n if fd.L1.IsWeak || fd.L1.HasPacked { - fd.L1.Options = func() pref.ProtoMessage { + fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() if fd.L1.IsWeak { opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) @@ -318,17 +316,17 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, } // Populate Enum and Message. - if fd.Enum() == nil && fd.Kind() == pref.EnumKind { + if fd.Enum() == nil && fd.Kind() == protoreflect.EnumKind { switch v := reflect.Zero(t).Interface().(type) { - case pref.Enum: + case protoreflect.Enum: fd.L1.Enum = v.Descriptor() default: fd.L1.Enum = LegacyLoadEnumDesc(t) } } - if fd.Message() == nil && (fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind) { + if fd.Message() == nil && (fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind) { switch v := reflect.Zero(t).Interface().(type) { - case pref.ProtoMessage: + case protoreflect.ProtoMessage: fd.L1.Message = v.ProtoReflect().Descriptor() case messageV1: fd.L1.Message = LegacyLoadMessageDesc(t) @@ -337,13 +335,13 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, n := len(md.L1.Messages.List) md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)}) md2 := &md.L1.Messages.List[n] - md2.L0.FullName = md.FullName().Append(pref.Name(strs.MapEntryName(string(fd.Name())))) + md2.L0.FullName = md.FullName().Append(protoreflect.Name(strs.MapEntryName(string(fd.Name())))) md2.L0.ParentFile = md.L0.ParentFile md2.L0.Parent = md md2.L0.Index = n md2.L1.IsMapEntry = true - md2.L2.Options = func() pref.ProtoMessage { + md2.L2.Options = func() protoreflect.ProtoMessage { opts := descopts.Message.ProtoReflect().New() opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true)) return opts.Interface() @@ -364,8 +362,8 @@ type placeholderEnumValues struct { protoreflect.EnumValueDescriptors } -func (placeholderEnumValues) ByNumber(n pref.EnumNumber) pref.EnumValueDescriptor { - return filedesc.PlaceholderEnumValue(pref.FullName(fmt.Sprintf("UNKNOWN_%d", n))) +func (placeholderEnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + return filedesc.PlaceholderEnumValue(protoreflect.FullName(fmt.Sprintf("UNKNOWN_%d", n))) } // legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder. @@ -383,7 +381,7 @@ type legacyMerger interface { Merge(protoiface.MessageV1) } -var aberrantProtoMethods = &piface.Methods{ +var aberrantProtoMethods = &protoiface.Methods{ Marshal: legacyMarshal, Unmarshal: legacyUnmarshal, Merge: legacyMerge, @@ -392,40 +390,40 @@ var aberrantProtoMethods = &piface.Methods{ // supports deterministic serialization or not, but this // preserves the v1 implementation's behavior of always // calling Marshal methods when present. - Flags: piface.SupportMarshalDeterministic, + Flags: protoiface.SupportMarshalDeterministic, } -func legacyMarshal(in piface.MarshalInput) (piface.MarshalOutput, error) { +func legacyMarshal(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { v := in.Message.(unwrapper).protoUnwrap() marshaler, ok := v.(legacyMarshaler) if !ok { - return piface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) + return protoiface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) } out, err := marshaler.Marshal() if in.Buf != nil { out = append(in.Buf, out...) } - return piface.MarshalOutput{ + return protoiface.MarshalOutput{ Buf: out, }, err } -func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { +func legacyUnmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { v := in.Message.(unwrapper).protoUnwrap() unmarshaler, ok := v.(legacyUnmarshaler) if !ok { - return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) + return protoiface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) } - return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) + return protoiface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) } -func legacyMerge(in piface.MergeInput) piface.MergeOutput { +func legacyMerge(in protoiface.MergeInput) protoiface.MergeOutput { // Check whether this supports the legacy merger. dstv := in.Destination.(unwrapper).protoUnwrap() merger, ok := dstv.(legacyMerger) if ok { merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } // If legacy merger is unavailable, implement merge in terms of @@ -433,29 +431,29 @@ func legacyMerge(in piface.MergeInput) piface.MergeOutput { srcv := in.Source.(unwrapper).protoUnwrap() marshaler, ok := srcv.(legacyMarshaler) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } dstv = in.Destination.(unwrapper).protoUnwrap() unmarshaler, ok := dstv.(legacyUnmarshaler) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } if !in.Source.IsValid() { // Legacy Marshal methods may not function on nil messages. // Check for a typed nil source only after we confirm that // legacy Marshal/Unmarshal methods are present, for // consistency. - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } b, err := marshaler.Marshal() if err != nil { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } err = unmarshaler.Unmarshal(b) if err != nil { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } // aberrantMessageType implements MessageType for all types other than pointer-to-struct. @@ -463,19 +461,19 @@ type aberrantMessageType struct { t reflect.Type } -func (mt aberrantMessageType) New() pref.Message { +func (mt aberrantMessageType) New() protoreflect.Message { if mt.t.Kind() == reflect.Ptr { return aberrantMessage{reflect.New(mt.t.Elem())} } return aberrantMessage{reflect.Zero(mt.t)} } -func (mt aberrantMessageType) Zero() pref.Message { +func (mt aberrantMessageType) Zero() protoreflect.Message { return aberrantMessage{reflect.Zero(mt.t)} } func (mt aberrantMessageType) GoType() reflect.Type { return mt.t } -func (mt aberrantMessageType) Descriptor() pref.MessageDescriptor { +func (mt aberrantMessageType) Descriptor() protoreflect.MessageDescriptor { return LegacyLoadMessageDesc(mt.t) } @@ -499,56 +497,56 @@ func (m aberrantMessage) Reset() { } } -func (m aberrantMessage) ProtoReflect() pref.Message { +func (m aberrantMessage) ProtoReflect() protoreflect.Message { return m } -func (m aberrantMessage) Descriptor() pref.MessageDescriptor { +func (m aberrantMessage) Descriptor() protoreflect.MessageDescriptor { return LegacyLoadMessageDesc(m.v.Type()) } -func (m aberrantMessage) Type() pref.MessageType { +func (m aberrantMessage) Type() protoreflect.MessageType { return aberrantMessageType{m.v.Type()} } -func (m aberrantMessage) New() pref.Message { +func (m aberrantMessage) New() protoreflect.Message { if m.v.Type().Kind() == reflect.Ptr { return aberrantMessage{reflect.New(m.v.Type().Elem())} } return aberrantMessage{reflect.Zero(m.v.Type())} } -func (m aberrantMessage) Interface() pref.ProtoMessage { +func (m aberrantMessage) Interface() protoreflect.ProtoMessage { return m } -func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { +func (m aberrantMessage) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { return } -func (m aberrantMessage) Has(pref.FieldDescriptor) bool { +func (m aberrantMessage) Has(protoreflect.FieldDescriptor) bool { return false } -func (m aberrantMessage) Clear(pref.FieldDescriptor) { +func (m aberrantMessage) Clear(protoreflect.FieldDescriptor) { panic("invalid Message.Clear on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value { +func (m aberrantMessage) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { if fd.Default().IsValid() { return fd.Default() } panic("invalid Message.Get on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { +func (m aberrantMessage) Set(protoreflect.FieldDescriptor, protoreflect.Value) { panic("invalid Message.Set on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { +func (m aberrantMessage) Mutable(protoreflect.FieldDescriptor) protoreflect.Value { panic("invalid Message.Mutable on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { +func (m aberrantMessage) NewField(protoreflect.FieldDescriptor) protoreflect.Value { panic("invalid Message.NewField on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { +func (m aberrantMessage) WhichOneof(protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) GetUnknown() pref.RawFields { +func (m aberrantMessage) GetUnknown() protoreflect.RawFields { return nil } -func (m aberrantMessage) SetUnknown(pref.RawFields) { +func (m aberrantMessage) SetUnknown(protoreflect.RawFields) { // SetUnknown discards its input on messages which don't support unknown field storage. } func (m aberrantMessage) IsValid() bool { @@ -557,7 +555,7 @@ func (m aberrantMessage) IsValid() bool { } return false } -func (m aberrantMessage) ProtoMethods() *piface.Methods { +func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } func (m aberrantMessage) protoUnwrap() interface{} { diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go index c65bbc0446ea8..7e65f64f28e37 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -9,8 +9,8 @@ import ( "reflect" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) type mergeOptions struct{} @@ -20,17 +20,17 @@ func (o mergeOptions) Merge(dst, src proto.Message) { } // merge is protoreflect.Methods.Merge. -func (mi *MessageInfo) merge(in piface.MergeInput) piface.MergeOutput { +func (mi *MessageInfo) merge(in protoiface.MergeInput) protoiface.MergeOutput { dp, ok := mi.getPointer(in.Destination) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } sp, ok := mi.getPointer(in.Source) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } mi.mergePointer(dp, sp, mergeOptions{}) - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { @@ -64,7 +64,7 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { continue } dx := (*dext)[num] - var dv pref.Value + var dv protoreflect.Value if dx.Type() == sx.Type() { dv = dx.Value() } @@ -85,15 +85,15 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { } } -func mergeScalarValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeScalarValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { return src } -func mergeBytesValue(dst, src pref.Value, opts mergeOptions) pref.Value { - return pref.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) +func mergeBytesValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { + return protoreflect.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) } -func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { dstl := dst.List() srcl := src.List() for i, llen := 0, srcl.Len(); i < llen; i++ { @@ -102,29 +102,29 @@ func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { return dst } -func mergeBytesListValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeBytesListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { dstl := dst.List() srcl := src.List() for i, llen := 0, srcl.Len(); i < llen; i++ { sb := srcl.Get(i).Bytes() db := append(emptyBuf[:], sb...) - dstl.Append(pref.ValueOfBytes(db)) + dstl.Append(protoreflect.ValueOfBytes(db)) } return dst } -func mergeMessageListValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeMessageListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { dstl := dst.List() srcl := src.List() for i, llen := 0, srcl.Len(); i < llen; i++ { sm := srcl.Get(i).Message() dm := proto.Clone(sm.Interface()).ProtoReflect() - dstl.Append(pref.ValueOfMessage(dm)) + dstl.Append(protoreflect.ValueOfMessage(dm)) } return dst } -func mergeMessageValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeMessageValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { opts.Merge(dst.Message().Interface(), src.Message().Interface()) return dst } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index a104e28e858fa..4f5fb67a0ddb6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -14,8 +14,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -29,7 +28,7 @@ type MessageInfo struct { GoReflectType reflect.Type // pointer to struct // Desc is the underlying message descriptor type and must be populated. - Desc pref.MessageDescriptor + Desc protoreflect.MessageDescriptor // Exporter must be provided in a purego environment in order to provide // access to unexported fields. @@ -54,7 +53,7 @@ type exporter func(v interface{}, i int) interface{} // is generated by our implementation of protoc-gen-go (for v2 and on). // If it is unable to obtain a MessageInfo, it returns nil. func getMessageInfo(mt reflect.Type) *MessageInfo { - m, ok := reflect.Zero(mt).Interface().(pref.ProtoMessage) + m, ok := reflect.Zero(mt).Interface().(protoreflect.ProtoMessage) if !ok { return nil } @@ -97,7 +96,7 @@ func (mi *MessageInfo) initOnce() { // getPointer returns the pointer for a message, which should be of // the type of the MessageInfo. If the message is of a different type, // it returns ok==false. -func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { +func (mi *MessageInfo) getPointer(m protoreflect.Message) (p pointer, ok bool) { switch m := m.(type) { case *messageState: return m.pointer(), m.messageInfo() == mi @@ -134,10 +133,10 @@ type structInfo struct { extensionOffset offset extensionType reflect.Type - fieldsByNumber map[pref.FieldNumber]reflect.StructField - oneofsByName map[pref.Name]reflect.StructField - oneofWrappersByType map[reflect.Type]pref.FieldNumber - oneofWrappersByNumber map[pref.FieldNumber]reflect.Type + fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField + oneofsByName map[protoreflect.Name]reflect.StructField + oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber + oneofWrappersByNumber map[protoreflect.FieldNumber]reflect.Type } func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { @@ -147,10 +146,10 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { unknownOffset: invalidOffset, extensionOffset: invalidOffset, - fieldsByNumber: map[pref.FieldNumber]reflect.StructField{}, - oneofsByName: map[pref.Name]reflect.StructField{}, - oneofWrappersByType: map[reflect.Type]pref.FieldNumber{}, - oneofWrappersByNumber: map[pref.FieldNumber]reflect.Type{}, + fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{}, + oneofsByName: map[protoreflect.Name]reflect.StructField{}, + oneofWrappersByType: map[reflect.Type]protoreflect.FieldNumber{}, + oneofWrappersByNumber: map[protoreflect.FieldNumber]reflect.Type{}, } fieldLoop: @@ -180,12 +179,12 @@ fieldLoop: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { n, _ := strconv.ParseUint(s, 10, 64) - si.fieldsByNumber[pref.FieldNumber(n)] = f + si.fieldsByNumber[protoreflect.FieldNumber(n)] = f continue fieldLoop } } if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 { - si.oneofsByName[pref.Name(s)] = f + si.oneofsByName[protoreflect.Name(s)] = f continue fieldLoop } } @@ -208,8 +207,8 @@ fieldLoop: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { n, _ := strconv.ParseUint(s, 10, 64) - si.oneofWrappersByType[tf] = pref.FieldNumber(n) - si.oneofWrappersByNumber[pref.FieldNumber(n)] = tf + si.oneofWrappersByType[tf] = protoreflect.FieldNumber(n) + si.oneofWrappersByNumber[protoreflect.FieldNumber(n)] = tf break } } @@ -219,7 +218,11 @@ fieldLoop: } func (mi *MessageInfo) New() protoreflect.Message { - return mi.MessageOf(reflect.New(mi.GoReflectType.Elem()).Interface()) + m := reflect.New(mi.GoReflectType.Elem()).Interface() + if r, ok := m.(protoreflect.ProtoMessage); ok { + return r.ProtoReflect() + } + return mi.MessageOf(m) } func (mi *MessageInfo) Zero() protoreflect.Message { return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) @@ -237,7 +240,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { fd := mi.Desc.Fields().Get(i) switch { case fd.IsWeak(): - mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName()) + mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()) return mt case fd.IsMap(): return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index 9488b72613136..d9ea010bef9af 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -10,17 +10,17 @@ import ( "google.golang.org/protobuf/internal/detrand" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type reflectMessageInfo struct { - fields map[pref.FieldNumber]*fieldInfo - oneofs map[pref.Name]*oneofInfo + fields map[protoreflect.FieldNumber]*fieldInfo + oneofs map[protoreflect.Name]*oneofInfo // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[pref.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]interface{} // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -30,8 +30,8 @@ type reflectMessageInfo struct { // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. rangeInfos []interface{} // either *fieldInfo or *oneofInfo - getUnknown func(pointer) pref.RawFields - setUnknown func(pointer, pref.RawFields) + getUnknown func(pointer) protoreflect.RawFields + setUnknown func(pointer, protoreflect.RawFields) extensionMap func(pointer) *extensionMap nilMessage atomicNilMessage @@ -52,7 +52,7 @@ func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { // This code assumes that the struct is well-formed and panics if there are // any discrepancies. func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { - mi.fields = map[pref.FieldNumber]*fieldInfo{} + mi.fields = map[protoreflect.FieldNumber]*fieldInfo{} md := mi.Desc fds := md.Fields() for i := 0; i < fds.Len(); i++ { @@ -82,7 +82,7 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { mi.fields[fd.Number()] = &fi } - mi.oneofs = map[pref.Name]*oneofInfo{} + mi.oneofs = map[protoreflect.Name]*oneofInfo{} for i := 0; i < md.Oneofs().Len(); i++ { od := md.Oneofs().Get(i) mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter) @@ -117,13 +117,13 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { switch { case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType: // Handle as []byte. - mi.getUnknown = func(p pointer) pref.RawFields { + mi.getUnknown = func(p pointer) protoreflect.RawFields { if p.IsNil() { return nil } return *p.Apply(mi.unknownOffset).Bytes() } - mi.setUnknown = func(p pointer, b pref.RawFields) { + mi.setUnknown = func(p pointer, b protoreflect.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } @@ -131,7 +131,7 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { } case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType: // Handle as *[]byte. - mi.getUnknown = func(p pointer) pref.RawFields { + mi.getUnknown = func(p pointer) protoreflect.RawFields { if p.IsNil() { return nil } @@ -141,7 +141,7 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { } return **bp } - mi.setUnknown = func(p pointer, b pref.RawFields) { + mi.setUnknown = func(p pointer, b protoreflect.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } @@ -152,10 +152,10 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { **bp = b } default: - mi.getUnknown = func(pointer) pref.RawFields { + mi.getUnknown = func(pointer) protoreflect.RawFields { return nil } - mi.setUnknown = func(p pointer, _ pref.RawFields) { + mi.setUnknown = func(p pointer, _ protoreflect.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[pref.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -233,7 +233,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { type extensionMap map[int32]ExtensionField -func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { +func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { if m != nil { for _, x := range *m { xd := x.Type().TypeDescriptor() @@ -247,7 +247,7 @@ func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { } } } -func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { +func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { if m == nil { return false } @@ -266,10 +266,10 @@ func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { } return true } -func (m *extensionMap) Clear(xt pref.ExtensionType) { +func (m *extensionMap) Clear(xt protoreflect.ExtensionType) { delete(*m, int32(xt.TypeDescriptor().Number())) } -func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { +func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value { xd := xt.TypeDescriptor() if m != nil { if x, ok := (*m)[int32(xd.Number())]; ok { @@ -278,7 +278,7 @@ func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { } return xt.Zero() } -func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { +func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) { xd := xt.TypeDescriptor() isValid := true switch { @@ -302,9 +302,9 @@ func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { x.Set(xt, v) (*m)[int32(xd.Number())] = x } -func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { +func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value { xd := xt.TypeDescriptor() - if xd.Kind() != pref.MessageKind && xd.Kind() != pref.GroupKind && !xd.IsList() && !xd.IsMap() { + if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() { panic("invalid Mutable on field with non-composite type") } if x, ok := (*m)[int32(xd.Number())]; ok { @@ -320,7 +320,6 @@ func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { // in an allocation-free way without needing to have a shadow Go type generated // for every message type. This technique only works using unsafe. // -// // Example generated code: // // type M struct { @@ -351,12 +350,11 @@ func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { // It has access to the message info as its first field, and a pointer to the // MessageState is identical to a pointer to the concrete message value. // -// // Requirements: -// • The type M must implement protoreflect.ProtoMessage. -// • The address of m must not be nil. -// • The address of m and the address of m.state must be equal, -// even though they are different Go types. +// - The type M must implement protoreflect.ProtoMessage. +// - The address of m must not be nil. +// - The address of m and the address of m.state must be equal, +// even though they are different Go types. type MessageState struct { pragma.NoUnkeyedLiterals pragma.DoNotCompare @@ -368,8 +366,8 @@ type MessageState struct { type messageState MessageState var ( - _ pref.Message = (*messageState)(nil) - _ unwrapper = (*messageState)(nil) + _ protoreflect.Message = (*messageState)(nil) + _ unwrapper = (*messageState)(nil) ) // messageDataType is a tuple of a pointer to the message data and @@ -387,16 +385,16 @@ type ( ) var ( - _ pref.Message = (*messageReflectWrapper)(nil) - _ unwrapper = (*messageReflectWrapper)(nil) - _ pref.ProtoMessage = (*messageIfaceWrapper)(nil) - _ unwrapper = (*messageIfaceWrapper)(nil) + _ protoreflect.Message = (*messageReflectWrapper)(nil) + _ unwrapper = (*messageReflectWrapper)(nil) + _ protoreflect.ProtoMessage = (*messageIfaceWrapper)(nil) + _ unwrapper = (*messageIfaceWrapper)(nil) ) // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { +func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -421,7 +419,7 @@ func (m *messageIfaceWrapper) Reset() { rv.Elem().Set(reflect.Zero(rv.Type().Elem())) } } -func (m *messageIfaceWrapper) ProtoReflect() pref.Message { +func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } func (m *messageIfaceWrapper) protoUnwrap() interface{} { @@ -430,7 +428,7 @@ func (m *messageIfaceWrapper) protoUnwrap() interface{} { // checkField verifies that the provided field descriptor is valid. // Exactly one of the returned values is populated. -func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.ExtensionType) { +func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) { var fi *fieldInfo if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { fi = mi.denseFields[n] @@ -455,7 +453,7 @@ func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.Ext if !mi.Desc.ExtensionRanges().Has(fd.Number()) { panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName())) } - xtd, ok := fd.(pref.ExtensionTypeDescriptor) + xtd, ok := fd.(protoreflect.ExtensionTypeDescriptor) if !ok { panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 343cf872197f7..5e736c60efc73 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -11,24 +11,24 @@ import ( "sync" "google.golang.org/protobuf/internal/flags" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" ) type fieldInfo struct { - fieldDesc pref.FieldDescriptor + fieldDesc protoreflect.FieldDescriptor // These fields are used for protobuf reflection support. has func(pointer) bool clear func(pointer) - get func(pointer) pref.Value - set func(pointer, pref.Value) - mutable func(pointer) pref.Value - newMessage func() pref.Message - newField func() pref.Value + get func(pointer) protoreflect.Value + set func(pointer, protoreflect.Value) + mutable func(pointer) protoreflect.Value + newMessage func() protoreflect.Message + newField func() protoreflect.Value } -func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { +func fieldInfoForMissing(fd protoreflect.FieldDescriptor) fieldInfo { // This never occurs for generated message types. // It implies that a hand-crafted type has missing Go fields // for specific protobuf message fields. @@ -40,19 +40,19 @@ func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { clear: func(p pointer) { panic("missing Go struct field for " + string(fd.FullName())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { return fd.Default() }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { panic("missing Go struct field for " + string(fd.FullName())) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { panic("missing Go struct field for " + string(fd.FullName())) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { panic("missing Go struct field for " + string(fd.FullName())) }, - newField: func() pref.Value { + newField: func() protoreflect.Value { if v := fd.Default(); v.IsValid() { return v } @@ -61,7 +61,7 @@ func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { } } -func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { +func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Interface { panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft)) @@ -102,7 +102,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export } rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -113,7 +113,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export rv = rv.Elem().Elem().Field(0) return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { rv.Set(reflect.New(ot)) @@ -121,7 +121,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export rv = rv.Elem().Elem().Field(0) rv.Set(conv.GoValueOf(v)) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { if !isMessage { panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName())) } @@ -131,20 +131,20 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export } rv = rv.Elem().Elem().Field(0) if rv.Kind() == reflect.Ptr && rv.IsNil() { - rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) + rv.Set(conv.GoValueOf(protoreflect.ValueOfMessage(conv.New().Message()))) } return conv.PBValueOf(rv) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { return conv.New().Message() }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } -func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Map { panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft)) @@ -166,7 +166,7 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -176,7 +176,7 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter } return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() pv := conv.GoValueOf(v) if pv.IsNil() { @@ -184,20 +184,20 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter } rv.Set(pv) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if v.IsNil() { v.Set(reflect.MakeMap(fs.Type)) } return conv.PBValueOf(v) }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } -func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Slice { panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft)) @@ -219,7 +219,7 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -229,7 +229,7 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte } return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() pv := conv.GoValueOf(v) if pv.IsNil() { @@ -237,11 +237,11 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte } rv.Set(pv.Elem()) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { v := p.Apply(fieldOffset).AsValueOf(fs.Type) return conv.PBValueOf(v) }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } @@ -252,7 +252,7 @@ var ( emptyBytes = reflect.ValueOf([]byte{}) ) -func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type nullable := fd.HasPresence() isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 @@ -300,7 +300,7 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -315,7 +315,7 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor } return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable && rv.Kind() == reflect.Ptr { if rv.IsNil() { @@ -332,23 +332,23 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor } } }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } -func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldInfo { +func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { if !flags.ProtoLegacy { panic("no support for proto1 weak fields") } var once sync.Once - var messageType pref.MessageType + var messageType protoreflect.MessageType lazyInit := func() { once.Do(func() { messageName := fd.Message().FullName() - messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) if messageType == nil { panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) } @@ -368,18 +368,18 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn clear: func(p pointer) { p.Apply(weakOffset).WeakFields().clear(num) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { lazyInit() if p.IsNil() { - return pref.ValueOfMessage(messageType.Zero()) + return protoreflect.ValueOfMessage(messageType.Zero()) } m, ok := p.Apply(weakOffset).WeakFields().get(num) if !ok { - return pref.ValueOfMessage(messageType.Zero()) + return protoreflect.ValueOfMessage(messageType.Zero()) } - return pref.ValueOfMessage(m.ProtoReflect()) + return protoreflect.ValueOfMessage(m.ProtoReflect()) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { lazyInit() m := v.Message() if m.Descriptor() != messageType.Descriptor() { @@ -390,7 +390,7 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn } p.Apply(weakOffset).WeakFields().set(num, m.Interface()) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { lazyInit() fs := p.Apply(weakOffset).WeakFields() m, ok := fs.get(num) @@ -398,20 +398,20 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn m = messageType.New().Interface() fs.set(num, m) } - return pref.ValueOfMessage(m.ProtoReflect()) + return protoreflect.ValueOfMessage(m.ProtoReflect()) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { lazyInit() return messageType.New() }, - newField: func() pref.Value { + newField: func() protoreflect.Value { lazyInit() - return pref.ValueOfMessage(messageType.New()) + return protoreflect.ValueOfMessage(messageType.New()) }, } } -func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) @@ -433,47 +433,47 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(conv.GoValueOf(v)) if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) } }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { rv.Set(conv.GoValueOf(conv.New())) } return conv.PBValueOf(rv) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { return conv.New().Message() }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } type oneofInfo struct { - oneofDesc pref.OneofDescriptor - which func(pointer) pref.FieldNumber + oneofDesc protoreflect.OneofDescriptor + which func(pointer) protoreflect.FieldNumber } -func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo { +func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo { oi := &oneofInfo{oneofDesc: od} if od.IsSynthetic() { fs := si.fieldsByNumber[od.Fields().Get(0).Number()] fieldOffset := offsetOf(fs, x) - oi.which = func(p pointer) pref.FieldNumber { + oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 } @@ -486,7 +486,7 @@ func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInf } else { fs := si.oneofsByName[od.Name()] fieldOffset := offsetOf(fs, x) - oi.which = func(p pointer) pref.FieldNumber { + oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 } diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index 08cfb6054b431..a24e6bbd7a5f9 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -16,9 +16,9 @@ import ( "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" ) // ValidationStatus is the result of validating the wire-format encoding of a message. @@ -56,20 +56,20 @@ func (v ValidationStatus) String() string { // of the message type. // // This function is exposed for testing. -func Validate(mt pref.MessageType, in piface.UnmarshalInput) (out piface.UnmarshalOutput, _ ValidationStatus) { +func Validate(mt protoreflect.MessageType, in protoiface.UnmarshalInput) (out protoiface.UnmarshalOutput, _ ValidationStatus) { mi, ok := mt.(*MessageInfo) if !ok { return out, ValidationUnknown } if in.Resolver == nil { - in.Resolver = preg.GlobalTypes + in.Resolver = protoregistry.GlobalTypes } o, st := mi.validate(in.Buf, 0, unmarshalOptions{ flags: in.Flags, resolver: in.Resolver, }) if o.initialized { - out.Flags |= piface.UnmarshalInitialized + out.Flags |= protoiface.UnmarshalInitialized } return out, st } @@ -106,22 +106,22 @@ const ( validationTypeMessageSetItem ) -func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo { +func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd protoreflect.FieldDescriptor, ft reflect.Type) validationInfo { var vi validationInfo switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): switch fd.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.typ = validationTypeMessage if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { vi.mi = getMessageInfo(ot.Field(0).Type) } - case pref.GroupKind: + case protoreflect.GroupKind: vi.typ = validationTypeGroup if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { vi.mi = getMessageInfo(ot.Field(0).Type) } - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { vi.typ = validationTypeUTF8String } @@ -129,7 +129,7 @@ func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescrip default: vi = newValidationInfo(fd, ft) } - if fd.Cardinality() == pref.Required { + if fd.Cardinality() == protoreflect.Required { // Avoid overflow. The required field check is done with a 64-bit mask, with // any message containing more than 64 required fields always reported as // potentially uninitialized, so it is not important to get a precise count @@ -142,22 +142,22 @@ func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescrip return vi } -func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo { +func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validationInfo { var vi validationInfo switch { case fd.IsList(): switch fd.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.typ = validationTypeMessage if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } - case pref.GroupKind: + case protoreflect.GroupKind: vi.typ = validationTypeGroup if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } - case pref.StringKind: + case protoreflect.StringKind: vi.typ = validationTypeBytes if strs.EnforceUTF8(fd) { vi.typ = validationTypeUTF8String @@ -175,33 +175,33 @@ func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo case fd.IsMap(): vi.typ = validationTypeMap switch fd.MapKey().Kind() { - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { vi.keyType = validationTypeUTF8String } } switch fd.MapValue().Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.valType = validationTypeMessage if ft.Kind() == reflect.Map { vi.mi = getMessageInfo(ft.Elem()) } - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { vi.valType = validationTypeUTF8String } } default: switch fd.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.typ = validationTypeMessage if !fd.IsWeak() { vi.mi = getMessageInfo(ft) } - case pref.GroupKind: + case protoreflect.GroupKind: vi.typ = validationTypeGroup vi.mi = getMessageInfo(ft) - case pref.StringKind: + case protoreflect.StringKind: vi.typ = validationTypeBytes if strs.EnforceUTF8(fd) { vi.typ = validationTypeUTF8String @@ -314,11 +314,11 @@ State: break } messageName := fd.Message().FullName() - messageType, err := preg.GlobalTypes.FindMessageByName(messageName) + messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName) switch err { case nil: vi.mi, _ = messageType.(*MessageInfo) - case preg.NotFound: + case protoregistry.NotFound: vi.typ = validationTypeBytes default: return out, ValidationUnknown @@ -335,7 +335,7 @@ State: // unmarshaling to begin failing. Supporting this requires some way to // determine if the resolver is frozen. xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num) - if err != nil && err != preg.NotFound { + if err != nil && err != protoregistry.NotFound { return out, ValidationUnknown } if err == nil { @@ -513,7 +513,7 @@ State: } xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid) switch { - case err == preg.NotFound: + case err == protoregistry.NotFound: b = b[n:] case err != nil: return out, ValidationUnknown diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go index 009cbefd1ed2c..eb79a7ba94c09 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/weak.go +++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go @@ -7,7 +7,7 @@ package impl import ( "fmt" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -17,32 +17,32 @@ import ( // defined directly on it. type weakFields WeakFields -func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) { +func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) { m, ok := w[int32(num)] return m, ok } -func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) { +func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) { if *w == nil { *w = make(weakFields) } (*w)[int32(num)] = m } -func (w *weakFields) clear(num pref.FieldNumber) { +func (w *weakFields) clear(num protoreflect.FieldNumber) { delete(*w, int32(num)) } -func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool { +func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool { _, ok := w[int32(num)] return ok } -func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) { +func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) { delete(*w, int32(num)) } -func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage { +func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage { if m, ok := w[int32(num)]; ok { return m } @@ -53,7 +53,7 @@ func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pr return mt.Zero().Interface() } -func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) { +func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) { if m != nil { mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) if mt == nil { diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go index 2a24953f6a47a..33745ed062541 100644 --- a/vendor/google.golang.org/protobuf/internal/order/order.go +++ b/vendor/google.golang.org/protobuf/internal/order/order.go @@ -5,12 +5,12 @@ package order import ( - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // FieldOrder specifies the ordering to visit message fields. // It is a function that reports whether x is ordered before y. -type FieldOrder func(x, y pref.FieldDescriptor) bool +type FieldOrder func(x, y protoreflect.FieldDescriptor) bool var ( // AnyFieldOrder specifies no specific field ordering. @@ -18,9 +18,9 @@ var ( // LegacyFieldOrder sorts fields in the same ordering as emitted by // wire serialization in the github.com/golang/protobuf implementation. - LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + LegacyFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { ox, oy := x.ContainingOneof(), y.ContainingOneof() - inOneof := func(od pref.OneofDescriptor) bool { + inOneof := func(od protoreflect.OneofDescriptor) bool { return od != nil && !od.IsSynthetic() } @@ -41,14 +41,14 @@ var ( } // NumberFieldOrder sorts fields by their field number. - NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + NumberFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { return x.Number() < y.Number() } // IndexNameFieldOrder sorts non-extension fields before extension fields. // Non-extensions are sorted according to their declaration index. // Extensions are sorted according to their full name. - IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + IndexNameFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { // Non-extension fields sort before extension fields. if x.IsExtension() != y.IsExtension() { return !x.IsExtension() && y.IsExtension() @@ -64,7 +64,7 @@ var ( // KeyOrder specifies the ordering to visit map entries. // It is a function that reports whether x is ordered before y. -type KeyOrder func(x, y pref.MapKey) bool +type KeyOrder func(x, y protoreflect.MapKey) bool var ( // AnyKeyOrder specifies no specific key ordering. @@ -72,7 +72,7 @@ var ( // GenericKeyOrder sorts false before true, numeric keys in ascending order, // and strings in lexicographical ordering according to UTF-8 codepoints. - GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool { + GenericKeyOrder KeyOrder = func(x, y protoreflect.MapKey) bool { switch x.Interface().(type) { case bool: return !x.Bool() && y.Bool() diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index c8090e0c547f6..1665a68e5b7c4 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -9,12 +9,12 @@ import ( "sort" "sync" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type messageField struct { - fd pref.FieldDescriptor - v pref.Value + fd protoreflect.FieldDescriptor + v protoreflect.Value } var messageFieldPool = sync.Pool{ @@ -25,8 +25,8 @@ type ( // FieldRnger is an interface for visiting all fields in a message. // The protoreflect.Message type implements this interface. FieldRanger interface{ Range(VisitField) } - // VisitField is called everytime a message field is visited. - VisitField = func(pref.FieldDescriptor, pref.Value) bool + // VisitField is called every time a message field is visited. + VisitField = func(protoreflect.FieldDescriptor, protoreflect.Value) bool ) // RangeFields iterates over the fields of fs according to the specified order. @@ -47,7 +47,7 @@ func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { }() // Collect all fields in the message and sort them. - fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { + fs.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { fields = append(fields, messageField{fd, v}) return true }) @@ -64,8 +64,8 @@ func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { } type mapEntry struct { - k pref.MapKey - v pref.Value + k protoreflect.MapKey + v protoreflect.Value } var mapEntryPool = sync.Pool{ @@ -76,8 +76,8 @@ type ( // EntryRanger is an interface for visiting all fields in a message. // The protoreflect.Map type implements this interface. EntryRanger interface{ Range(VisitEntry) } - // VisitEntry is called everytime a map entry is visited. - VisitEntry = func(pref.MapKey, pref.Value) bool + // VisitEntry is called every time a map entry is visited. + VisitEntry = func(protoreflect.MapKey, protoreflect.Value) bool ) // RangeEntries iterates over the entries of es according to the specified order. @@ -98,7 +98,7 @@ func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) { }() // Collect all entries in the map and sort them. - es.Range(func(k pref.MapKey, v pref.Value) bool { + es.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { entries = append(entries, mapEntry{k, v}) return true }) diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index 56a8a4ed3c9d1..fea589c457e94 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -10,7 +10,7 @@ package strs import ( "unsafe" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type ( @@ -59,7 +59,7 @@ type Builder struct { // AppendFullName is equivalent to protoreflect.FullName.Append, // but optimized for large batches where each name has a shared lifetime. -func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { n := len(prefix) + len(".") + len(name) if len(prefix) == 0 { n -= len(".") @@ -68,7 +68,7 @@ func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.Ful sb.buf = append(sb.buf, prefix...) sb.buf = append(sb.buf, '.') sb.buf = append(sb.buf, name...) - return pref.FullName(sb.last(n)) + return protoreflect.FullName(sb.last(n)) } // MakeString is equivalent to string(b), but optimized for large batches diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 3d40d5249e965..b480c5010f1d0 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -12,54 +12,54 @@ import ( // These constants determine the current version of this module. // -// // For our release process, we enforce the following rules: -// * Tagged releases use a tag that is identical to String. -// * Tagged releases never reference a commit where the String -// contains "devel". -// * The set of all commits in this repository where String -// does not contain "devel" must have a unique String. -// +// - Tagged releases use a tag that is identical to String. +// - Tagged releases never reference a commit where the String +// contains "devel". +// - The set of all commits in this repository where String +// does not contain "devel" must have a unique String. // // Steps for tagging a new release: -// 1. Create a new CL. // -// 2. Update Minor, Patch, and/or PreRelease as necessary. -// PreRelease must not contain the string "devel". +// 1. Create a new CL. // -// 3. Since the last released minor version, have there been any changes to -// generator that relies on new functionality in the runtime? -// If yes, then increment RequiredGenerated. +// 2. Update Minor, Patch, and/or PreRelease as necessary. +// PreRelease must not contain the string "devel". // -// 4. Since the last released minor version, have there been any changes to -// the runtime that removes support for old .pb.go source code? -// If yes, then increment SupportMinimum. +// 3. Since the last released minor version, have there been any changes to +// generator that relies on new functionality in the runtime? +// If yes, then increment RequiredGenerated. // -// 5. Send out the CL for review and submit it. -// Note that the next CL in step 8 must be submitted after this CL -// without any other CLs in-between. +// 4. Since the last released minor version, have there been any changes to +// the runtime that removes support for old .pb.go source code? +// If yes, then increment SupportMinimum. // -// 6. Tag a new version, where the tag is is the current String. +// 5. Send out the CL for review and submit it. +// Note that the next CL in step 8 must be submitted after this CL +// without any other CLs in-between. // -// 7. Write release notes for all notable changes -// between this release and the last release. +// 6. Tag a new version, where the tag is is the current String. // -// 8. Create a new CL. +// 7. Write release notes for all notable changes +// between this release and the last release. // -// 9. Update PreRelease to include the string "devel". -// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" +// 8. Create a new CL. // -// 10. Send out the CL for review and submit it. +// 9. Update PreRelease to include the string "devel". +// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" +// +// 10. Send out the CL for review and submit it. const ( Major = 1 Minor = 28 - Patch = 0 + Patch = 1 PreRelease = "" ) // String formats the version string for this module in semver format. // // Examples: +// // v1.20.1 // v1.21.0-rc.1 func String() string { diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 11bf7173be922..48d47946bb1ad 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -19,7 +19,8 @@ import ( // UnmarshalOptions configures the unmarshaler. // // Example usage: -// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) +// +// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) type UnmarshalOptions struct { pragma.NoUnkeyedLiterals diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index c52d8c4ab79ff..08d2a46f53525 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -6,18 +6,17 @@ // // For documentation on protocol buffers in general, see: // -// https://developers.google.com/protocol-buffers +// https://developers.google.com/protocol-buffers // // For a tutorial on using protocol buffers with Go, see: // -// https://developers.google.com/protocol-buffers/docs/gotutorial +// https://developers.google.com/protocol-buffers/docs/gotutorial // // For a guide to generated Go protocol buffer code, see: // -// https://developers.google.com/protocol-buffers/docs/reference/go-generated +// https://developers.google.com/protocol-buffers/docs/reference/go-generated // -// -// Binary serialization +// # Binary serialization // // This package contains functions to convert to and from the wire format, // an efficient binary serialization of protocol buffers. @@ -30,8 +29,7 @@ // • Unmarshal converts a message from the wire format. // The UnmarshalOptions type provides more control over wire unmarshaling. // -// -// Basic message operations +// # Basic message operations // // • Clone makes a deep copy of a message. // @@ -45,8 +43,7 @@ // // • CheckInitialized reports whether all required fields in a message are set. // -// -// Optional scalar constructors +// # Optional scalar constructors // // The API for some generated messages represents optional scalar fields // as pointers to a value. For example, an optional string field has the @@ -61,16 +58,14 @@ // // Optional scalar fields are only supported in proto2. // -// -// Extension accessors +// # Extension accessors // // • HasExtension, GetExtension, SetExtension, and ClearExtension // access extension field values in a protocol buffer message. // // Extension fields are only supported in proto2. // -// -// Related packages +// # Related packages // // • Package "google.golang.org/protobuf/encoding/protojson" converts messages to // and from JSON. diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index d18239c23723a..bf7f816d0e862 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -16,7 +16,8 @@ import ( // MarshalOptions configures the marshaler. // // Example usage: -// b, err := MarshalOptions{Deterministic: true}.Marshal(m) +// +// b, err := MarshalOptions{Deterministic: true}.Marshal(m) type MarshalOptions struct { pragma.NoUnkeyedLiterals @@ -101,7 +102,9 @@ func (o MarshalOptions) Marshal(m Message) ([]byte, error) { // otherwise it returns a non-nil empty buffer. // // This is to assist the edge-case where user-code does the following: +// // m1.OptionalBytes, _ = proto.Marshal(m2) +// // where they expect the proto2 "optional_bytes" field to be populated // if any only if m2 is a valid message. func emptyBytesForMessage(m Message) []byte { diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 4dba2b9699729..67948dd1df8cc 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -10,7 +10,7 @@ import ( "reflect" "google.golang.org/protobuf/encoding/protowire" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // Equal reports whether two messages are equal. @@ -33,6 +33,10 @@ func Equal(x, y Message) bool { if x == nil || y == nil { return x == nil && y == nil } + if reflect.TypeOf(x).Kind() == reflect.Ptr && x == y { + // Avoid an expensive comparison if both inputs are identical pointers. + return true + } mx := x.ProtoReflect() my := y.ProtoReflect() if mx.IsValid() != my.IsValid() { @@ -42,14 +46,14 @@ func Equal(x, y Message) bool { } // equalMessage compares two messages. -func equalMessage(mx, my pref.Message) bool { +func equalMessage(mx, my protoreflect.Message) bool { if mx.Descriptor() != my.Descriptor() { return false } nx := 0 equal := true - mx.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { nx++ vy := my.Get(fd) equal = my.Has(fd) && equalField(fd, vx, vy) @@ -59,7 +63,7 @@ func equalMessage(mx, my pref.Message) bool { return false } ny := 0 - my.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + my.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { ny++ return true }) @@ -71,7 +75,7 @@ func equalMessage(mx, my pref.Message) bool { } // equalField compares two fields. -func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool { +func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { switch { case fd.IsList(): return equalList(fd, x.List(), y.List()) @@ -83,12 +87,12 @@ func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool { } // equalMap compares two maps. -func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool { +func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool { if x.Len() != y.Len() { return false } equal := true - x.Range(func(k pref.MapKey, vx pref.Value) bool { + x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { vy := y.Get(k) equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) return equal @@ -97,7 +101,7 @@ func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool { } // equalList compares two lists. -func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { +func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool { if x.Len() != y.Len() { return false } @@ -110,31 +114,31 @@ func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { } // equalValue compares two singular values. -func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { +func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: return x.Bool() == y.Bool() - case pref.EnumKind: + case protoreflect.EnumKind: return x.Enum() == y.Enum() - case pref.Int32Kind, pref.Sint32Kind, - pref.Int64Kind, pref.Sint64Kind, - pref.Sfixed32Kind, pref.Sfixed64Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, + protoreflect.Int64Kind, protoreflect.Sint64Kind, + protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: return x.Int() == y.Int() - case pref.Uint32Kind, pref.Uint64Kind, - pref.Fixed32Kind, pref.Fixed64Kind: + case protoreflect.Uint32Kind, protoreflect.Uint64Kind, + protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: return x.Uint() == y.Uint() - case pref.FloatKind, pref.DoubleKind: + case protoreflect.FloatKind, protoreflect.DoubleKind: fx := x.Float() fy := y.Float() if math.IsNaN(fx) || math.IsNaN(fy) { return math.IsNaN(fx) && math.IsNaN(fy) } return fx == fy - case pref.StringKind: + case protoreflect.StringKind: return x.String() == y.String() - case pref.BytesKind: + case protoreflect.BytesKind: return bytes.Equal(x.Bytes(), y.Bytes()) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: return equalMessage(x.Message(), y.Message()) default: return x.Interface() == y.Interface() @@ -143,7 +147,7 @@ func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { // equalUnknown compares unknown fields by direct comparison on the raw bytes // of each individual field number. -func equalUnknown(x, y pref.RawFields) bool { +func equalUnknown(x, y protoreflect.RawFields) bool { if len(x) != len(y) { return false } @@ -151,8 +155,8 @@ func equalUnknown(x, y pref.RawFields) bool { return true } - mx := make(map[pref.FieldNumber]pref.RawFields) - my := make(map[pref.FieldNumber]pref.RawFields) + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) for len(x) > 0 { fnum, _, n := protowire.ConsumeField(x) mx[fnum] = append(mx[fnum], x[:n]...) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index cebb36cdade61..27d7e35012d3a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -155,9 +155,9 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, // // Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", // then the following full names are searched: -// * fizz.buzz.Foo.Bar -// * fizz.Foo.Bar -// * Foo.Bar +// - fizz.buzz.Foo.Bar +// - fizz.Foo.Bar +// - Foo.Bar func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { if !ref.IsValid() { return nil, errors.New("invalid name reference: %q", ref) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index dd85915bd4bfb..55aa14922b015 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -8,8 +8,7 @@ // defined in proto source files and value interfaces which provide the // ability to examine and manipulate the contents of messages. // -// -// Protocol Buffer Descriptors +// # Protocol Buffer Descriptors // // Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) // are immutable objects that represent protobuf type information. @@ -26,8 +25,7 @@ // The "google.golang.org/protobuf/reflect/protodesc" package converts between // google.protobuf.DescriptorProto messages and protobuf descriptors. // -// -// Go Type Descriptors +// # Go Type Descriptors // // A type descriptor (e.g., EnumType or MessageType) is a constructor for // a concrete Go type that represents the associated protobuf descriptor. @@ -41,8 +39,7 @@ // The "google.golang.org/protobuf/types/dynamicpb" package can be used to // create Go type descriptors from protobuf descriptors. // -// -// Value Interfaces +// # Value Interfaces // // The Enum and Message interfaces provide a reflective view over an // enum or message instance. For enums, it provides the ability to retrieve @@ -55,13 +52,11 @@ // The "github.com/golang/protobuf/proto".MessageReflect function can be used // to obtain a reflective view on older messages. // -// -// Relationships +// # Relationships // // The following diagrams demonstrate the relationships between // various types declared in this package. // -// // ┌───────────────────────────────────┐ // V │ // ┌────────────── New(n) ─────────────┐ │ @@ -83,7 +78,6 @@ // // • An Enum is a concrete enum instance. Generated enums implement Enum. // -// // ┌──────────────── New() ─────────────────┐ // │ │ // │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐ @@ -98,12 +92,22 @@ // // • A MessageType describes a concrete Go message type. // It has a MessageDescriptor and can construct a Message instance. +// Just as how Go's reflect.Type is a reflective description of a Go type, +// a MessageType is a reflective description of a Go type for a protobuf message. // // • A MessageDescriptor describes an abstract protobuf message type. -// -// • A Message is a concrete message instance. Generated messages implement -// ProtoMessage, which can convert to/from a Message. -// +// It has no understanding of Go types. In order to construct a MessageType +// from just a MessageDescriptor, you can consider looking up the message type +// in the global registry using protoregistry.GlobalTypes.FindMessageByName +// or constructing a dynamic MessageType using dynamicpb.NewMessageType. +// +// • A Message is a reflective view over a concrete message instance. +// Generated messages implement ProtoMessage, which can convert to a Message. +// Just as how Go's reflect.Value is a reflective view over a Go value, +// a Message is a reflective view over a concrete protobuf message instance. +// Using Go reflection as an analogy, the ProtoReflect method is similar to +// calling reflect.ValueOf, and the Message.Interface method is similar to +// calling reflect.Value.Interface. // // ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐ // │ V │ V diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go index 121ba3a07bba9..0b99428855fe9 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go @@ -87,6 +87,7 @@ func (p1 SourcePath) Equal(p2 SourcePath) bool { // in a future version of this module. // // Example output: +// // .message_type[6].nested_type[15].field[3] func (p SourcePath) String() string { b := p.appendFileDescriptorProto(nil) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 8e53c44a9188a..3867470d30ac0 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -480,6 +480,7 @@ type ExtensionDescriptors interface { // relative to the parent that it is declared within. // // For example: +// // syntax = "proto2"; // package example; // message FooMessage { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index eb7764c307c05..ca8e28c5bc8b9 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -50,6 +50,7 @@ import ( // always references the source object. // // For example: +// // // Append a 0 to a "repeated int32" field. // // Since the Value returned by Mutable is guaranteed to alias // // the source message, modifying the Value modifies the message. @@ -392,6 +393,7 @@ func (v Value) MapKey() MapKey { // ╚═════════╧═════════════════════════════════════╝ // // A MapKey is constructed and accessed through a Value: +// // k := ValueOf("hash").MapKey() // convert string to MapKey // s := k.String() // convert MapKey to string // diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 59f024c444fc8..58352a6978bee 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -30,9 +30,11 @@ import ( // conflictPolicy configures the policy for handling registration conflicts. // // It can be over-written at compile time with a linker-initialized variable: +// // go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn" // // It can be over-written at program execution with an environment variable: +// // GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main // // Neither of the above are covered by the compatibility promise and diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go index ff094e1ba44b2..a105cb23e0331 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go @@ -26,16 +26,19 @@ const ( // EnforceVersion is used by code generated by protoc-gen-go // to statically enforce minimum and maximum versions of this package. // A compilation failure implies either that: -// * the runtime package is too old and needs to be updated OR -// * the generated code is too old and needs to be regenerated. +// - the runtime package is too old and needs to be updated OR +// - the generated code is too old and needs to be regenerated. // // The runtime package can be upgraded by running: +// // go get google.golang.org/protobuf // // The generated code can be regenerated by running: +// // protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES} // // Example usage by generated code: +// // const ( // // Verify that this generated code is sufficiently up-to-date. // _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion) @@ -49,6 +52,7 @@ const ( type EnforceVersion uint // This enforces the following invariant: +// // MinVersion ≤ GenVersion ≤ MaxVersion const ( _ = EnforceVersion(GenVersion - MinVersion) diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index 7f94443d26996..1b2085d469050 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -394,7 +394,7 @@ func numValidPaths(m proto.Message, paths []string) int { // Identify the next message to search within. md = fd.Message() // may be nil - // Repeated fields are only allowed at the last postion. + // Repeated fields are only allowed at the last position. if fd.IsList() || fd.IsMap() { md = nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index dfd66fc161ec1..34480f4d88c02 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,35 +1,47 @@ -# cloud.google.com/go v0.100.2 -## explicit; go 1.11 +# cloud.google.com/go v0.107.0 +## explicit; go 1.19 cloud.google.com/go cloud.google.com/go/internal cloud.google.com/go/internal/optional +cloud.google.com/go/internal/pubsub cloud.google.com/go/internal/testutil cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -cloud.google.com/go/longrunning -cloud.google.com/go/longrunning/autogen # cloud.google.com/go/bigtable v1.3.0 ## explicit; go 1.11 cloud.google.com/go/bigtable cloud.google.com/go/bigtable/bttest cloud.google.com/go/bigtable/internal/option -# cloud.google.com/go/compute v1.6.1 -## explicit; go 1.15 +# cloud.google.com/go/compute v1.14.0 +## explicit; go 1.19 +cloud.google.com/go/compute/internal +# cloud.google.com/go/compute/metadata v0.2.3 +## explicit; go 1.19 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v0.1.0 -## explicit; go 1.11 +# cloud.google.com/go/iam v0.8.0 +## explicit; go 1.19 cloud.google.com/go/iam -# cloud.google.com/go/kms v1.0.0 -## explicit; go 1.16 -# cloud.google.com/go/pubsub v1.3.1 -## explicit; go 1.11 +cloud.google.com/go/iam/apiv1/iampb +# cloud.google.com/go/longrunning v0.3.0 +## explicit; go 1.19 +cloud.google.com/go/longrunning +cloud.google.com/go/longrunning/autogen +cloud.google.com/go/longrunning/autogen/longrunningpb +# cloud.google.com/go/pubsub v1.27.1 +## explicit; go 1.19 cloud.google.com/go/pubsub cloud.google.com/go/pubsub/apiv1 +cloud.google.com/go/pubsub/apiv1/pubsubpb +cloud.google.com/go/pubsub/internal cloud.google.com/go/pubsub/internal/distribution +cloud.google.com/go/pubsub/internal/scheduler cloud.google.com/go/pubsub/pstest -# cloud.google.com/go/storage v1.10.0 -## explicit; go 1.11 +# cloud.google.com/go/storage v1.29.0 +## explicit; go 1.19 cloud.google.com/go/storage +cloud.google.com/go/storage/internal +cloud.google.com/go/storage/internal/apiv2 +cloud.google.com/go/storage/internal/apiv2/stubs # github.com/Azure/azure-pipeline-go v0.2.3 ## explicit; go 1.14 github.com/Azure/azure-pipeline-go/pipeline @@ -463,7 +475,7 @@ github.com/golang/snappy # github.com/google/btree v1.0.1 ## explicit; go 1.12 github.com/google/btree -# github.com/google/go-cmp v0.5.8 +# github.com/google/go-cmp v0.5.9 ## explicit; go 1.13 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff @@ -483,11 +495,15 @@ github.com/google/pprof/profile # github.com/google/renameio/v2 v2.0.0 ## explicit; go 1.13 github.com/google/renameio/v2 -# github.com/google/uuid v1.2.0 +# github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid -# github.com/googleapis/gax-go/v2 v2.4.0 -## explicit; go 1.15 +# github.com/googleapis/enterprise-certificate-proxy v0.2.1 +## explicit; go 1.19 +github.com/googleapis/enterprise-certificate-proxy/client +github.com/googleapis/enterprise-certificate-proxy/client/util +# github.com/googleapis/gax-go/v2 v2.7.0 +## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto @@ -984,10 +1000,10 @@ github.com/spf13/cast # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/stretchr/objx v0.2.0 +# github.com/stretchr/objx v0.5.0 ## explicit; go 1.12 github.com/stretchr/objx -# github.com/stretchr/testify v1.7.2 +# github.com/stretchr/testify v1.8.1 ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/mock @@ -1113,7 +1129,7 @@ go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/x/bsonx/bsoncore -# go.opencensus.io v0.23.0 +# go.opencensus.io v0.24.0 ## explicit; go 1.13 go.opencensus.io go.opencensus.io/internal @@ -1198,10 +1214,10 @@ golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/scrypt golang.org/x/crypto/sha3 -# golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 +# golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 ## explicit; go 1.17 golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20220706163947-c90051bbdb60 +# golang.org/x/net v0.0.0-20221014081412-f15817d10f9b ## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/context @@ -1221,8 +1237,8 @@ golang.org/x/net/netutil golang.org/x/net/proxy golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 -## explicit; go 1.11 +# golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 +## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/clientcredentials @@ -1231,11 +1247,11 @@ golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f +# golang.org/x/sync v0.1.0 ## explicit golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b +# golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -1248,7 +1264,7 @@ golang.org/x/sys/windows/svc/eventlog # golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 ## explicit; go 1.17 golang.org/x/term -# golang.org/x/text v0.3.7 +# golang.org/x/text v0.5.0 ## explicit; go 1.17 golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -1267,14 +1283,15 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 +# golang.org/x/time v0.1.0 ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.1.10 -## explicit; go 1.17 +# golang.org/x/tools v0.1.12 +## explicit; go 1.18 golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/gcimporter golang.org/x/tools/go/internal/packagesdriver +golang.org/x/tools/go/internal/pkgbits golang.org/x/tools/go/packages golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core @@ -1284,16 +1301,18 @@ golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal -# golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df +# golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 ## explicit; go 1.17 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.83.0 -## explicit; go 1.15 +# google.golang.org/api v0.106.0 +## explicit; go 1.19 google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/compute/v1 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport +google.golang.org/api/iamcredentials/v1 +google.golang.org/api/impersonate google.golang.org/api/internal google.golang.org/api/internal/gensupport google.golang.org/api/internal/impersonate @@ -1303,6 +1322,7 @@ google.golang.org/api/option google.golang.org/api/option/internaloption google.golang.org/api/storage/v1 google.golang.org/api/support/bundler +google.golang.org/api/transport google.golang.org/api/transport/cert google.golang.org/api/transport/grpc google.golang.org/api/transport/http @@ -1322,8 +1342,9 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 -## explicit; go 1.15 +# google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f +## explicit; go 1.19 +google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/bigtable/admin/v2 google.golang.org/genproto/googleapis/bigtable/v2 @@ -1333,9 +1354,10 @@ google.golang.org/genproto/googleapis/pubsub/v1 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status +google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.47.0 => google.golang.org/grpc v1.45.0 +# google.golang.org/grpc v1.51.0 => google.golang.org/grpc v1.45.0 ## explicit; go 1.14 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1398,7 +1420,7 @@ google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap google.golang.org/grpc/test/bufconn -# google.golang.org/protobuf v1.28.0 +# google.golang.org/protobuf v1.28.1 ## explicit; go 1.11 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext