From 75613ef97b51494ce68b8a6d300562c69756b8be Mon Sep 17 00:00:00 2001 From: Emad Mohamadi Date: Fri, 24 Jun 2022 13:17:11 +0200 Subject: [PATCH 01/11] Catch up with carbonapi_v2 grpc protocol --- go.mod | 12 +- go.sum | 41 +- .../carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go | 125 ++ .../carbonapi_v2_grpc/carbonapi_v2_grpc.proto | 16 + .../carbonapi_v2_grpc_vtproto.pb.go | 375 +++++ .../protocol/carbonapi_v2_grpc/gen.go | 13 + .../carbonapi_v2_pb/carbonapi_v2_pb.pb.go | 712 +++++++-- .../carbonapi_v2_pb/carbonapi_v2_pb.proto | 48 +- .../carbonapi_v2_pb_vtproto.pb.go | 1372 +++++++++++++++-- .../carbonapi_v3_pb/carbonapi_v3_pb.pb.go | 221 +-- .../carbonapi_v3_pb_vtproto.pb.go | 572 ++++++- .../golang/protobuf/jsonpb/decode.go | 524 +++++++ .../golang/protobuf/jsonpb/encode.go | 559 +++++++ .../github.com/golang/protobuf/jsonpb/json.go | 69 + vendor/github.com/google/go-cmp/cmp/path.go | 2 +- .../google/go-cmp/cmp/report_slices.go | 202 ++- .../testify/assert/assertion_compare.go | 172 ++- .../testify/assert/assertion_format.go | 97 ++ .../testify/assert/assertion_forward.go | 194 +++ .../testify/assert/assertion_order.go | 81 + .../stretchr/testify/assert/assertions.go | 83 +- vendor/google.golang.org/grpc/.travis.yml | 42 - vendor/google.golang.org/grpc/CONTRIBUTING.md | 7 +- vendor/google.golang.org/grpc/MAINTAINERS.md | 5 +- vendor/google.golang.org/grpc/Makefile | 2 - vendor/google.golang.org/grpc/NOTICE.txt | 13 + vendor/google.golang.org/grpc/README.md | 2 +- .../grpc/attributes/attributes.go | 80 +- .../grpc/balancer/balancer.go | 100 +- .../grpc/balancer/base/balancer.go | 80 +- .../grpc_lb_v1/load_balancer_grpc.pb.go | 2 +- .../grpc/balancer/grpclb/grpclb.go | 40 +- .../grpc/balancer/grpclb/grpclb_config.go | 1 + .../balancer/grpclb/grpclb_remote_balancer.go | 63 +- .../grpc/balancer/grpclb/state/state.go | 2 +- .../grpc/balancer/roundrobin/roundrobin.go | 4 +- .../grpc/balancer_conn_wrappers.go | 312 +++- .../grpc/channelz/channelz.go | 36 + vendor/google.golang.org/grpc/clientconn.go | 879 ++++++----- .../grpc/connectivity/connectivity.go | 35 +- .../internal/handshaker/service/service.go | 3 +- .../proto/grpc_gcp/handshaker_grpc.pb.go | 2 +- .../grpc/credentials/credentials.go | 25 +- .../grpc/credentials/google/google.go | 71 +- .../grpc/credentials/google/xds.go | 7 +- .../grpc/credentials/insecure/insecure.go | 98 ++ .../grpc/credentials/oauth/oauth.go | 19 +- .../google.golang.org/grpc/credentials/tls.go | 3 + vendor/google.golang.org/grpc/dialoptions.go | 101 +- .../grpc/encoding/encoding.go | 2 +- .../grpc/grpclog/loggerv2.go | 94 +- vendor/google.golang.org/grpc/install_gae.sh | 6 - vendor/google.golang.org/grpc/interceptor.go | 9 +- .../balancer/gracefulswitch/gracefulswitch.go | 382 +++++ .../grpc/internal/binarylog/binarylog.go | 91 +- .../grpc/internal/binarylog/env_config.go | 6 +- .../grpc/internal/binarylog/method_logger.go | 26 +- .../grpc/internal/binarylog/sink.go | 41 +- .../grpc/internal/channelz/funcs.go | 240 +-- .../grpc/internal/channelz/id.go | 75 + .../grpc/internal/channelz/logging.go | 91 +- .../grpc/internal/channelz/types.go | 23 +- .../grpc/internal/channelz/types_linux.go | 2 - .../grpc/internal/channelz/types_nonlinux.go | 5 +- .../grpc/internal/channelz/util_linux.go | 2 - .../grpc/internal/channelz/util_nonlinux.go | 3 +- .../grpc/internal/credentials/spiffe.go | 2 - .../grpc/internal/credentials/syscallconn.go | 2 - .../grpc/internal/credentials/util.go | 4 +- .../grpc/internal/envconfig/envconfig.go | 3 - .../grpc/internal/envconfig/xds.go | 101 ++ .../grpc/internal/googlecloud/googlecloud.go | 76 +- .../manufacturer.go} | 16 +- .../manufacturer_linux.go} | 18 +- .../googlecloud/manufacturer_windows.go | 50 + .../grpc/internal/grpclog/grpclog.go | 8 +- .../grpc/internal/grpcrand/grpcrand.go | 29 +- .../dns/go113.go => grpcutil/grpcutil.go} | 19 +- .../go12.go => internal/grpcutil/regex.go} | 21 +- .../grpc/internal/grpcutil/target.go | 89 -- .../grpc/internal/internal.go | 13 +- .../grpc/internal/metadata/metadata.go | 76 +- .../grpc/internal/pretty/pretty.go | 82 + .../grpc/internal/resolver/config_selector.go | 9 +- .../internal/resolver/dns/dns_resolver.go | 9 +- .../grpc/internal/resolver/unix/unix.go | 12 +- .../internal/serviceconfig/serviceconfig.go | 4 +- .../grpc/internal/status/status.go | 14 +- .../grpc/internal/syscall/syscall_linux.go | 2 - .../grpc/internal/syscall/syscall_nonlinux.go | 21 +- .../grpc/internal/transport/controlbuf.go | 38 +- .../grpc/internal/transport/flowcontrol.go | 4 +- .../grpc/internal/transport/handler_server.go | 3 +- .../grpc/internal/transport/http2_client.go | 299 +++- .../grpc/internal/transport/http2_server.go | 365 +++-- .../grpc/internal/transport/http_util.go | 224 +-- .../transport/networktype/networktype.go | 4 +- .../grpc/internal/transport/proxy.go | 4 +- .../grpc/internal/transport/transport.go | 23 +- .../grpc/internal/xds_handshake_cluster.go | 2 +- .../grpc/metadata/metadata.go | 80 +- .../google.golang.org/grpc/picker_wrapper.go | 12 +- vendor/google.golang.org/grpc/pickfirst.go | 135 +- .../reflection_grpc.pb.go | 2 +- .../grpc/reflection/serverreflection.go | 416 ++--- vendor/google.golang.org/grpc/regenerate.sh | 34 +- vendor/google.golang.org/grpc/resolver/map.go | 109 ++ .../grpc/resolver/resolver.go | 62 +- .../grpc/resolver_conn_wrapper.go | 33 +- vendor/google.golang.org/grpc/rpc_util.go | 43 +- vendor/google.golang.org/grpc/server.go | 205 +-- .../google.golang.org/grpc/service_config.go | 5 +- vendor/google.golang.org/grpc/stats/stats.go | 11 +- .../google.golang.org/grpc/status/status.go | 34 +- vendor/google.golang.org/grpc/stream.go | 329 ++-- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 34 +- .../cmd/protoc-gen-go/internal_gengo/main.go | 46 +- .../protobuf/compiler/protogen/protogen.go | 9 +- .../protobuf/encoding/protojson/decode.go | 665 ++++++++ .../protobuf/encoding/protojson/doc.go | 11 + .../protobuf/encoding/protojson/encode.go | 344 +++++ .../encoding/protojson/well_known_types.go | 889 +++++++++++ .../protobuf/encoding/prototext/decode.go | 3 - .../protobuf/encoding/protowire/wire.go | 19 +- .../protobuf/internal/encoding/json/decode.go | 340 ++++ .../internal/encoding/json/decode_number.go | 254 +++ .../internal/encoding/json/decode_string.go | 91 ++ .../internal/encoding/json/decode_token.go | 192 +++ .../protobuf/internal/encoding/json/encode.go | 276 ++++ .../protobuf/internal/encoding/text/decode.go | 2 +- .../protobuf/internal/encoding/text/encode.go | 5 + .../protobuf/internal/errors/is_go112.go | 1 + .../protobuf/internal/errors/is_go113.go | 1 + .../internal/flags/proto_legacy_disable.go | 1 + .../internal/flags/proto_legacy_enable.go | 1 + .../protobuf/internal/impl/codec_map_go111.go | 1 + .../protobuf/internal/impl/codec_map_go112.go | 1 + .../protobuf/internal/impl/codec_reflect.go | 1 + .../protobuf/internal/impl/codec_unsafe.go | 1 + .../protobuf/internal/impl/decode.go | 8 + .../protobuf/internal/impl/legacy_message.go | 7 + .../protobuf/internal/impl/pointer_reflect.go | 1 + .../protobuf/internal/impl/pointer_unsafe.go | 1 + .../protobuf/internal/strs/strings_pure.go | 1 + .../protobuf/internal/strs/strings_unsafe.go | 1 + .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 17 +- .../protobuf/proto/proto_methods.go | 1 + .../protobuf/proto/proto_reflect.go | 1 + .../protobuf/reflect/protoreflect/methods.go | 1 + .../reflect/protoreflect/value_pure.go | 1 + .../reflect/protoreflect/value_union.go | 25 + .../reflect/protoreflect/value_unsafe.go | 1 + .../reflect/protoregistry/registry.go | 43 +- .../protobuf/runtime/protoiface/methods.go | 1 + .../types/descriptorpb/descriptor.pb.go | 82 - vendor/modules.txt | 24 +- 158 files changed, 12135 insertions(+), 3051 deletions(-) create mode 100644 vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go create mode 100644 vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto create mode 100644 vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc_vtproto.pb.go create mode 100644 vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/gen.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/decode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/encode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/json.go create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_order.go delete mode 100644 vendor/google.golang.org/grpc/.travis.yml create mode 100644 vendor/google.golang.org/grpc/NOTICE.txt create mode 100644 vendor/google.golang.org/grpc/channelz/channelz.go create mode 100644 vendor/google.golang.org/grpc/credentials/insecure/insecure.go delete mode 100644 vendor/google.golang.org/grpc/install_gae.sh create mode 100644 vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/id.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/xds.go rename vendor/google.golang.org/grpc/internal/{credentials/syscallconn_appengine.go => googlecloud/manufacturer.go} (72%) rename vendor/google.golang.org/grpc/internal/{credentials/spiffe_appengine.go => googlecloud/manufacturer_linux.go} (71%) create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go rename vendor/google.golang.org/grpc/internal/{resolver/dns/go113.go => grpcutil/grpcutil.go} (67%) rename vendor/google.golang.org/grpc/{credentials/go12.go => internal/grpcutil/regex.go} (58%) delete mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/target.go create mode 100644 vendor/google.golang.org/grpc/internal/pretty/pretty.go create mode 100644 vendor/google.golang.org/grpc/resolver/map.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/encode.go diff --git a/go.mod b/go.mod index ccd5f2d98..2ce596598 100644 --- a/go.mod +++ b/go.mod @@ -13,10 +13,10 @@ require ( github.com/dgryski/httputil v0.0.0-20160116060654-189c2918cd08 github.com/go-graphite/carbonzipper v0.0.0-20180329125635-fedce067a794 github.com/go-graphite/go-whisper v0.0.0-20220504075317-5035f072cad3 - github.com/go-graphite/protocol v1.0.1-0.20210605224534-ac8ad6ab1f97 + github.com/go-graphite/protocol v1.0.1-0.20220623155218-0727176fe6f6 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect - github.com/google/go-cmp v0.5.5 + github.com/google/go-cmp v0.5.6 github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 // indirect github.com/klauspost/compress v1.4.0 github.com/lomik/graphite-pickle v0.0.0-20171221213606-614e8df42119 @@ -25,7 +25,7 @@ require ( github.com/lomik/zapwriter v0.0.0-20180906104450-2ec2b9a61680 github.com/prometheus/client_golang v0.9.1 github.com/sevlyar/go-daemon v0.1.6-0.20210508104436-15bb82c8ea3c - github.com/stretchr/testify v1.6.1 + github.com/stretchr/testify v1.7.0 github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d github.com/vmihailenco/msgpack/v5 v5.3.5 go.opentelemetry.io/otel v0.9.0 @@ -33,7 +33,7 @@ require ( go.uber.org/zap v1.9.1 golang.org/x/net v0.0.0-20210525063256-abc453219eb5 google.golang.org/api v0.29.0 - google.golang.org/grpc v1.38.0 + google.golang.org/grpc v1.47.0 ) require ( @@ -46,7 +46,7 @@ require ( github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect - github.com/golang/protobuf v1.5.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect github.com/googleapis/gax-go/v2 v2.0.5 // indirect github.com/jstemmer/go-junit-report v0.9.1 // indirect github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 // indirect @@ -75,7 +75,7 @@ require ( golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.6.5 // indirect google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect - google.golang.org/protobuf v1.26.0 // indirect + google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect honnef.co/go/tools v0.0.1-2020.1.3 // indirect ) diff --git a/go.sum b/go.sum index 0057c4bff..99737f282 100644 --- a/go.sum +++ b/go.sum @@ -38,18 +38,24 @@ github.com/Shopify/sarama v1.17.0 h1:Y2/FBwElFVwt7aLKL3fDG6hh+rrlywR6uLgTgKObwTc github.com/Shopify/sarama v1.17.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -68,11 +74,13 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -80,8 +88,8 @@ github.com/go-graphite/carbonzipper v0.0.0-20180329125635-fedce067a794 h1:9N+1I8 github.com/go-graphite/carbonzipper v0.0.0-20180329125635-fedce067a794/go.mod h1:sfZ+AkP8/bBcWSGVqhseV6t6e/+C0i/PkTpA32W5rXs= github.com/go-graphite/go-whisper v0.0.0-20220504075317-5035f072cad3 h1:QlegEOONT3wkw4c8/dIwGeAahnHonvJ1MaUUzPviZxk= github.com/go-graphite/go-whisper v0.0.0-20220504075317-5035f072cad3/go.mod h1:XnU8q9X8C3OhD8R7gAlRLI6VXU2KMwqgTO9Tshub2qE= -github.com/go-graphite/protocol v1.0.1-0.20210605224534-ac8ad6ab1f97 h1:ACknzcx/Tfej2PMKtkPCgvtsXn5ofEz4XFPJajFP7Is= -github.com/go-graphite/protocol v1.0.1-0.20210605224534-ac8ad6ab1f97/go.mod h1:ZRdtrmvGQ9A4rJe5BTD8lMiZFG138rMWhdbFeUf98Gk= +github.com/go-graphite/protocol v1.0.1-0.20220623155218-0727176fe6f6 h1:o6x5biB/GOeCgdxgr1JrDHzKkj7jVLH1Mx/P293BLb0= +github.com/go-graphite/protocol v1.0.1-0.20220623155218-0727176fe6f6/go.mod h1:puWkW2DFZi46CaLY1rxYhkDiBDFlpaTDaMjcE/Cd9gw= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -106,8 +114,10 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -117,8 +127,9 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -131,6 +142,7 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -188,6 +200,7 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFd github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165 h1:nkcn14uNmFEuGCb2mBZbBb24RdNRL08b/wb+xBOYpuk= github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sevlyar/go-daemon v0.1.6-0.20210508104436-15bb82c8ea3c h1:qAf1dS4iPRD3Er73TL3HGHOSU6pGUG35rb9/9LY0wfE= github.com/sevlyar/go-daemon v0.1.6-0.20210508104436-15bb82c8ea3c/go.mod h1:6dJpPatBT9eUwM5VCw9Bt6CdX9Tk6UWvhW3MebLDRKE= @@ -195,8 +208,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d h1:4J9HCZVpvDmj2tiKGSTUnb3Ok/9CEQb9oqu9LHKQQpc= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= @@ -215,6 +229,7 @@ go.opentelemetry.io/otel v0.9.0 h1:nsdCDHzQx1Yv8E2nwCPcMXMfg+EMIlx1LBOXNC8qSQ8= go.opentelemetry.io/otel v0.9.0/go.mod h1:ckxzUEfk7tAkTwEMVdkllBM+YOfE/K9iwg6zYntFYSg= go.opentelemetry.io/otel/exporters/trace/jaeger v0.9.0 h1:DAbsxIO/OT7D8YaYlVzyrIGus33n9Z5oXqyrLa2UkcI= go.opentelemetry.io/otel/exporters/trace/jaeger v0.9.0/go.mod h1:WjKtt2IbPRRzJaBjixVvj68/zkPX2wuhzjGv8cGVzlA= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= @@ -278,6 +293,7 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -322,6 +338,7 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -409,6 +426,7 @@ google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4 google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -421,8 +439,10 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -433,8 +453,10 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= @@ -444,6 +466,7 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go new file mode 100644 index 000000000..4c34853e1 --- /dev/null +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go @@ -0,0 +1,125 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.15.8 +// source: carbonapi_v2_grpc.proto + +package carbonapi_v2_grpc + +import ( + carbonapi_v2_pb "github.com/go-graphite/protocol/carbonapi_v2_pb" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var File_carbonapi_v2_grpc_proto protoreflect.FileDescriptor + +var file_carbonapi_v2_grpc_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x63, 0x61, 0x72, 0x62, 0x6f, + 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x1a, 0x1b, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2d, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x74, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x63, 0x61, 0x72, 0x62, 0x6f, + 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2f, 0x63, 0x61, 0x72, 0x62, 0x6f, + 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x32, 0xcb, 0x03, 0x0a, 0x08, 0x43, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x56, 0x32, 0x12, 0x50, 0x0a, + 0x06, 0x52, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x22, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, + 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x61, + 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x65, + 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x47, 0x0a, 0x04, 0x46, 0x69, 0x6e, 0x64, 0x12, 0x1c, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, + 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x43, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, + 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1f, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, + 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x45, 0x0a, + 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, + 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x4d, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x28, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, + 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x33, + 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2d, + 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x74, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x2f, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x67, + 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_carbonapi_v2_grpc_proto_goTypes = []interface{}{ + (*carbonapi_v2_pb.MultiFetchRequest)(nil), // 0: carbonapi_v2_pb.MultiFetchRequest + (*carbonapi_v2_pb.GlobRequest)(nil), // 1: carbonapi_v2_pb.GlobRequest + (*emptypb.Empty)(nil), // 2: google.protobuf.Empty + (*carbonapi_v2_pb.InfoRequest)(nil), // 3: carbonapi_v2_pb.InfoRequest + (*carbonapi_v2_pb.FetchResponse)(nil), // 4: carbonapi_v2_pb.FetchResponse + (*carbonapi_v2_pb.GlobResponse)(nil), // 5: carbonapi_v2_pb.GlobResponse + (*carbonapi_v2_pb.MetricResponse)(nil), // 6: carbonapi_v2_pb.MetricResponse + (*carbonapi_v2_pb.InfoResponse)(nil), // 7: carbonapi_v2_pb.InfoResponse + (*carbonapi_v2_pb.MetricDetailsResponse)(nil), // 8: carbonapi_v2_pb.MetricDetailsResponse + (*carbonapi_v2_pb.ProtocolVersionResponse)(nil), // 9: carbonapi_v2_pb.ProtocolVersionResponse +} +var file_carbonapi_v2_grpc_proto_depIdxs = []int32{ + 0, // 0: carbonapi_v2_grpc.CarbonV2.Render:input_type -> carbonapi_v2_pb.MultiFetchRequest + 1, // 1: carbonapi_v2_grpc.CarbonV2.Find:input_type -> carbonapi_v2_pb.GlobRequest + 2, // 2: carbonapi_v2_grpc.CarbonV2.List:input_type -> google.protobuf.Empty + 3, // 3: carbonapi_v2_grpc.CarbonV2.Info:input_type -> carbonapi_v2_pb.InfoRequest + 2, // 4: carbonapi_v2_grpc.CarbonV2.Stats:input_type -> google.protobuf.Empty + 2, // 5: carbonapi_v2_grpc.CarbonV2.Version:input_type -> google.protobuf.Empty + 4, // 6: carbonapi_v2_grpc.CarbonV2.Render:output_type -> carbonapi_v2_pb.FetchResponse + 5, // 7: carbonapi_v2_grpc.CarbonV2.Find:output_type -> carbonapi_v2_pb.GlobResponse + 6, // 8: carbonapi_v2_grpc.CarbonV2.List:output_type -> carbonapi_v2_pb.MetricResponse + 7, // 9: carbonapi_v2_grpc.CarbonV2.Info:output_type -> carbonapi_v2_pb.InfoResponse + 8, // 10: carbonapi_v2_grpc.CarbonV2.Stats:output_type -> carbonapi_v2_pb.MetricDetailsResponse + 9, // 11: carbonapi_v2_grpc.CarbonV2.Version:output_type -> carbonapi_v2_pb.ProtocolVersionResponse + 6, // [6:12] is the sub-list for method output_type + 0, // [0:6] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_carbonapi_v2_grpc_proto_init() } +func file_carbonapi_v2_grpc_proto_init() { + if File_carbonapi_v2_grpc_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_carbonapi_v2_grpc_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_carbonapi_v2_grpc_proto_goTypes, + DependencyIndexes: file_carbonapi_v2_grpc_proto_depIdxs, + }.Build() + File_carbonapi_v2_grpc_proto = out.File + file_carbonapi_v2_grpc_proto_rawDesc = nil + file_carbonapi_v2_grpc_proto_goTypes = nil + file_carbonapi_v2_grpc_proto_depIdxs = nil +} diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto new file mode 100644 index 000000000..55aff7cc2 --- /dev/null +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package carbonapi_v2_grpc; + +option go_package = "github.com/go-graphite/protocol/carbonapi_v2_grpc"; + +import "google/protobuf/empty.proto"; +import "github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.proto"; + +service CarbonV2 { + rpc Render(carbonapi_v2_pb.MultiFetchRequest) returns (stream carbonapi_v2_pb.FetchResponse) {} + rpc Find(carbonapi_v2_pb.GlobRequest) returns (stream carbonapi_v2_pb.GlobResponse) {} + rpc List(google.protobuf.Empty) returns (stream carbonapi_v2_pb.MetricResponse) {} + rpc Info(carbonapi_v2_pb.InfoRequest) returns (carbonapi_v2_pb.InfoResponse) {} + rpc Stats(google.protobuf.Empty) returns (carbonapi_v2_pb.MetricDetailsResponse) {} + rpc Version(google.protobuf.Empty) returns (carbonapi_v2_pb.ProtocolVersionResponse) {} +} diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc_vtproto.pb.go b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc_vtproto.pb.go new file mode 100644 index 000000000..fd79a9f13 --- /dev/null +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc_vtproto.pb.go @@ -0,0 +1,375 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.3.0 +// source: carbonapi_v2_grpc.proto + +package carbonapi_v2_grpc + +import ( + context "context" + carbonapi_v2_pb "github.com/go-graphite/protocol/carbonapi_v2_pb" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// CarbonV2Client is the client API for CarbonV2 service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CarbonV2Client interface { + Render(ctx context.Context, in *carbonapi_v2_pb.MultiFetchRequest, opts ...grpc.CallOption) (CarbonV2_RenderClient, error) + Find(ctx context.Context, in *carbonapi_v2_pb.GlobRequest, opts ...grpc.CallOption) (CarbonV2_FindClient, error) + List(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (CarbonV2_ListClient, error) + Info(ctx context.Context, in *carbonapi_v2_pb.InfoRequest, opts ...grpc.CallOption) (*carbonapi_v2_pb.InfoResponse, error) + Stats(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*carbonapi_v2_pb.MetricDetailsResponse, error) + Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*carbonapi_v2_pb.ProtocolVersionResponse, error) +} + +type carbonV2Client struct { + cc grpc.ClientConnInterface +} + +func NewCarbonV2Client(cc grpc.ClientConnInterface) CarbonV2Client { + return &carbonV2Client{cc} +} + +func (c *carbonV2Client) Render(ctx context.Context, in *carbonapi_v2_pb.MultiFetchRequest, opts ...grpc.CallOption) (CarbonV2_RenderClient, error) { + stream, err := c.cc.NewStream(ctx, &CarbonV2_ServiceDesc.Streams[0], "/carbonapi_v2_grpc.CarbonV2/Render", opts...) + if err != nil { + return nil, err + } + x := &carbonV2RenderClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type CarbonV2_RenderClient interface { + Recv() (*carbonapi_v2_pb.FetchResponse, error) + grpc.ClientStream +} + +type carbonV2RenderClient struct { + grpc.ClientStream +} + +func (x *carbonV2RenderClient) Recv() (*carbonapi_v2_pb.FetchResponse, error) { + m := new(carbonapi_v2_pb.FetchResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *carbonV2Client) Find(ctx context.Context, in *carbonapi_v2_pb.GlobRequest, opts ...grpc.CallOption) (CarbonV2_FindClient, error) { + stream, err := c.cc.NewStream(ctx, &CarbonV2_ServiceDesc.Streams[1], "/carbonapi_v2_grpc.CarbonV2/Find", opts...) + if err != nil { + return nil, err + } + x := &carbonV2FindClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type CarbonV2_FindClient interface { + Recv() (*carbonapi_v2_pb.GlobResponse, error) + grpc.ClientStream +} + +type carbonV2FindClient struct { + grpc.ClientStream +} + +func (x *carbonV2FindClient) Recv() (*carbonapi_v2_pb.GlobResponse, error) { + m := new(carbonapi_v2_pb.GlobResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *carbonV2Client) List(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (CarbonV2_ListClient, error) { + stream, err := c.cc.NewStream(ctx, &CarbonV2_ServiceDesc.Streams[2], "/carbonapi_v2_grpc.CarbonV2/List", opts...) + if err != nil { + return nil, err + } + x := &carbonV2ListClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type CarbonV2_ListClient interface { + Recv() (*carbonapi_v2_pb.MetricResponse, error) + grpc.ClientStream +} + +type carbonV2ListClient struct { + grpc.ClientStream +} + +func (x *carbonV2ListClient) Recv() (*carbonapi_v2_pb.MetricResponse, error) { + m := new(carbonapi_v2_pb.MetricResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *carbonV2Client) Info(ctx context.Context, in *carbonapi_v2_pb.InfoRequest, opts ...grpc.CallOption) (*carbonapi_v2_pb.InfoResponse, error) { + out := new(carbonapi_v2_pb.InfoResponse) + err := c.cc.Invoke(ctx, "/carbonapi_v2_grpc.CarbonV2/Info", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *carbonV2Client) Stats(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*carbonapi_v2_pb.MetricDetailsResponse, error) { + out := new(carbonapi_v2_pb.MetricDetailsResponse) + err := c.cc.Invoke(ctx, "/carbonapi_v2_grpc.CarbonV2/Stats", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *carbonV2Client) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*carbonapi_v2_pb.ProtocolVersionResponse, error) { + out := new(carbonapi_v2_pb.ProtocolVersionResponse) + err := c.cc.Invoke(ctx, "/carbonapi_v2_grpc.CarbonV2/Version", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CarbonV2Server is the server API for CarbonV2 service. +// All implementations must embed UnimplementedCarbonV2Server +// for forward compatibility +type CarbonV2Server interface { + Render(*carbonapi_v2_pb.MultiFetchRequest, CarbonV2_RenderServer) error + Find(*carbonapi_v2_pb.GlobRequest, CarbonV2_FindServer) error + List(*emptypb.Empty, CarbonV2_ListServer) error + Info(context.Context, *carbonapi_v2_pb.InfoRequest) (*carbonapi_v2_pb.InfoResponse, error) + Stats(context.Context, *emptypb.Empty) (*carbonapi_v2_pb.MetricDetailsResponse, error) + Version(context.Context, *emptypb.Empty) (*carbonapi_v2_pb.ProtocolVersionResponse, error) + mustEmbedUnimplementedCarbonV2Server() +} + +// UnimplementedCarbonV2Server must be embedded to have forward compatible implementations. +type UnimplementedCarbonV2Server struct { +} + +func (UnimplementedCarbonV2Server) Render(*carbonapi_v2_pb.MultiFetchRequest, CarbonV2_RenderServer) error { + return status.Errorf(codes.Unimplemented, "method Render not implemented") +} +func (UnimplementedCarbonV2Server) Find(*carbonapi_v2_pb.GlobRequest, CarbonV2_FindServer) error { + return status.Errorf(codes.Unimplemented, "method Find not implemented") +} +func (UnimplementedCarbonV2Server) List(*emptypb.Empty, CarbonV2_ListServer) error { + return status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedCarbonV2Server) Info(context.Context, *carbonapi_v2_pb.InfoRequest) (*carbonapi_v2_pb.InfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") +} +func (UnimplementedCarbonV2Server) Stats(context.Context, *emptypb.Empty) (*carbonapi_v2_pb.MetricDetailsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stats not implemented") +} +func (UnimplementedCarbonV2Server) Version(context.Context, *emptypb.Empty) (*carbonapi_v2_pb.ProtocolVersionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") +} +func (UnimplementedCarbonV2Server) mustEmbedUnimplementedCarbonV2Server() {} + +// UnsafeCarbonV2Server may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CarbonV2Server will +// result in compilation errors. +type UnsafeCarbonV2Server interface { + mustEmbedUnimplementedCarbonV2Server() +} + +func RegisterCarbonV2Server(s grpc.ServiceRegistrar, srv CarbonV2Server) { + s.RegisterService(&CarbonV2_ServiceDesc, srv) +} + +func _CarbonV2_Render_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(carbonapi_v2_pb.MultiFetchRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(CarbonV2Server).Render(m, &carbonV2RenderServer{stream}) +} + +type CarbonV2_RenderServer interface { + Send(*carbonapi_v2_pb.FetchResponse) error + grpc.ServerStream +} + +type carbonV2RenderServer struct { + grpc.ServerStream +} + +func (x *carbonV2RenderServer) Send(m *carbonapi_v2_pb.FetchResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _CarbonV2_Find_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(carbonapi_v2_pb.GlobRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(CarbonV2Server).Find(m, &carbonV2FindServer{stream}) +} + +type CarbonV2_FindServer interface { + Send(*carbonapi_v2_pb.GlobResponse) error + grpc.ServerStream +} + +type carbonV2FindServer struct { + grpc.ServerStream +} + +func (x *carbonV2FindServer) Send(m *carbonapi_v2_pb.GlobResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _CarbonV2_List_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(emptypb.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(CarbonV2Server).List(m, &carbonV2ListServer{stream}) +} + +type CarbonV2_ListServer interface { + Send(*carbonapi_v2_pb.MetricResponse) error + grpc.ServerStream +} + +type carbonV2ListServer struct { + grpc.ServerStream +} + +func (x *carbonV2ListServer) Send(m *carbonapi_v2_pb.MetricResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _CarbonV2_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(carbonapi_v2_pb.InfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CarbonV2Server).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/carbonapi_v2_grpc.CarbonV2/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CarbonV2Server).Info(ctx, req.(*carbonapi_v2_pb.InfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CarbonV2_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CarbonV2Server).Stats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/carbonapi_v2_grpc.CarbonV2/Stats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CarbonV2Server).Stats(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _CarbonV2_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CarbonV2Server).Version(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/carbonapi_v2_grpc.CarbonV2/Version", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CarbonV2Server).Version(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// CarbonV2_ServiceDesc is the grpc.ServiceDesc for CarbonV2 service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var CarbonV2_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "carbonapi_v2_grpc.CarbonV2", + HandlerType: (*CarbonV2Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Info", + Handler: _CarbonV2_Info_Handler, + }, + { + MethodName: "Stats", + Handler: _CarbonV2_Stats_Handler, + }, + { + MethodName: "Version", + Handler: _CarbonV2_Version_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Render", + Handler: _CarbonV2_Render_Handler, + ServerStreams: true, + }, + { + StreamName: "Find", + Handler: _CarbonV2_Find_Handler, + ServerStreams: true, + }, + { + StreamName: "List", + Handler: _CarbonV2_List_Handler, + ServerStreams: true, + }, + }, + Metadata: "carbonapi_v2_grpc.proto", +} diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/gen.go b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/gen.go new file mode 100644 index 000000000..6ad8965fc --- /dev/null +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/gen.go @@ -0,0 +1,13 @@ +package carbonapi_v2_grpc + +//go:generate mkdir -p github.com/go-graphite/protocol/ +//go:generate cp -r ../carbonapi_v2_pb github.com/go-graphite/protocol/ +//go:generate protoc --go_out=. --go-vtproto_out=. --go-vtproto_opt=features=all carbonapi_v2_grpc.proto --proto_path=. --proto_path=../ +//go:generate mv github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go ./ +//go:generate mv github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc_vtproto.pb.go ./ +//go:generate rm -r github.com/go-graphite/protocol/carbonapi_v2_grpc +//go:generate rm -r github.com/go-graphite/protocol/carbonapi_v2_pb +//go:generate rm -r github.com/go-graphite/protocol +//go:generate rm -r github.com/go-graphite +//go:generate rm -r github.com/ +// protoc --go_out=. --go-vtproto_out=. --go-vtproto_opt=features=marshal+unmarshal+size --go_opt="Mprotocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto=github.com/go-graphite/carbonapi_v2_grpc/carbonapi_v2_grpc;carbonapi_v2_grpc" carbonapi_v2_grpc.proto --proto_path=../vendor/ --proto_path=. --proto_path=../ diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.pb.go b/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.pb.go index 28e0d8d93..f98f0e776 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.pb.go +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel -// protoc v3.12.4 +// protoc-gen-go v1.28.0 +// protoc v3.15.8 // source: carbonapi_v2_pb.proto package carbonapi_v2_pb @@ -20,6 +20,116 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type FetchRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + StartTime int32 `protobuf:"varint,2,opt,name=startTime,proto3" json:"startTime,omitempty"` + StopTime int32 `protobuf:"varint,3,opt,name=stopTime,proto3" json:"stopTime,omitempty"` +} + +func (x *FetchRequest) Reset() { + *x = FetchRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_carbonapi_v2_pb_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FetchRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FetchRequest) ProtoMessage() {} + +func (x *FetchRequest) ProtoReflect() protoreflect.Message { + mi := &file_carbonapi_v2_pb_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FetchRequest.ProtoReflect.Descriptor instead. +func (*FetchRequest) Descriptor() ([]byte, []int) { + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{0} +} + +func (x *FetchRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *FetchRequest) GetStartTime() int32 { + if x != nil { + return x.StartTime + } + return 0 +} + +func (x *FetchRequest) GetStopTime() int32 { + if x != nil { + return x.StopTime + } + return 0 +} + +type MultiFetchRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Metrics []*FetchRequest `protobuf:"bytes,1,rep,name=metrics,proto3" json:"metrics,omitempty"` +} + +func (x *MultiFetchRequest) Reset() { + *x = MultiFetchRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_carbonapi_v2_pb_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MultiFetchRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MultiFetchRequest) ProtoMessage() {} + +func (x *MultiFetchRequest) ProtoReflect() protoreflect.Message { + mi := &file_carbonapi_v2_pb_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MultiFetchRequest.ProtoReflect.Descriptor instead. +func (*MultiFetchRequest) Descriptor() ([]byte, []int) { + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{1} +} + +func (x *MultiFetchRequest) GetMetrics() []*FetchRequest { + if x != nil { + return x.Metrics + } + return nil +} + type FetchResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -36,7 +146,7 @@ type FetchResponse struct { func (x *FetchResponse) Reset() { *x = FetchResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[0] + mi := &file_carbonapi_v2_pb_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -49,7 +159,7 @@ func (x *FetchResponse) String() string { func (*FetchResponse) ProtoMessage() {} func (x *FetchResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[0] + mi := &file_carbonapi_v2_pb_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -62,7 +172,7 @@ func (x *FetchResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FetchResponse.ProtoReflect.Descriptor instead. func (*FetchResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{0} + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{2} } func (x *FetchResponse) GetName() string { @@ -118,7 +228,7 @@ type MultiFetchResponse struct { func (x *MultiFetchResponse) Reset() { *x = MultiFetchResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[1] + mi := &file_carbonapi_v2_pb_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -131,7 +241,7 @@ func (x *MultiFetchResponse) String() string { func (*MultiFetchResponse) ProtoMessage() {} func (x *MultiFetchResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[1] + mi := &file_carbonapi_v2_pb_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -144,7 +254,7 @@ func (x *MultiFetchResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MultiFetchResponse.ProtoReflect.Descriptor instead. func (*MultiFetchResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{1} + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{3} } func (x *MultiFetchResponse) GetMetrics() []*FetchResponse { @@ -154,6 +264,53 @@ func (x *MultiFetchResponse) GetMetrics() []*FetchResponse { return nil } +type GlobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` +} + +func (x *GlobRequest) Reset() { + *x = GlobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_carbonapi_v2_pb_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GlobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GlobRequest) ProtoMessage() {} + +func (x *GlobRequest) ProtoReflect() protoreflect.Message { + mi := &file_carbonapi_v2_pb_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GlobRequest.ProtoReflect.Descriptor instead. +func (*GlobRequest) Descriptor() ([]byte, []int) { + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{4} +} + +func (x *GlobRequest) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + type GlobMatch struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -166,7 +323,7 @@ type GlobMatch struct { func (x *GlobMatch) Reset() { *x = GlobMatch{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[2] + mi := &file_carbonapi_v2_pb_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -179,7 +336,7 @@ func (x *GlobMatch) String() string { func (*GlobMatch) ProtoMessage() {} func (x *GlobMatch) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[2] + mi := &file_carbonapi_v2_pb_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -192,7 +349,7 @@ func (x *GlobMatch) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobMatch.ProtoReflect.Descriptor instead. func (*GlobMatch) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{2} + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{5} } func (x *GlobMatch) GetPath() string { @@ -221,7 +378,7 @@ type GlobResponse struct { func (x *GlobResponse) Reset() { *x = GlobResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[3] + mi := &file_carbonapi_v2_pb_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -234,7 +391,7 @@ func (x *GlobResponse) String() string { func (*GlobResponse) ProtoMessage() {} func (x *GlobResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[3] + mi := &file_carbonapi_v2_pb_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -247,7 +404,7 @@ func (x *GlobResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobResponse.ProtoReflect.Descriptor instead. func (*GlobResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{3} + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{6} } func (x *GlobResponse) GetName() string { @@ -264,6 +421,53 @@ func (x *GlobResponse) GetMatches() []*GlobMatch { return nil } +type InfoRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *InfoRequest) Reset() { + *x = InfoRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_carbonapi_v2_pb_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InfoRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InfoRequest) ProtoMessage() {} + +func (x *InfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_carbonapi_v2_pb_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InfoRequest.ProtoReflect.Descriptor instead. +func (*InfoRequest) Descriptor() ([]byte, []int) { + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{7} +} + +func (x *InfoRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + type Retention struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -276,7 +480,7 @@ type Retention struct { func (x *Retention) Reset() { *x = Retention{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[4] + mi := &file_carbonapi_v2_pb_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -289,7 +493,7 @@ func (x *Retention) String() string { func (*Retention) ProtoMessage() {} func (x *Retention) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[4] + mi := &file_carbonapi_v2_pb_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -302,7 +506,7 @@ func (x *Retention) ProtoReflect() protoreflect.Message { // Deprecated: Use Retention.ProtoReflect.Descriptor instead. func (*Retention) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{4} + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{8} } func (x *Retention) GetSecondsPerPoint() int32 { @@ -334,7 +538,7 @@ type InfoResponse struct { func (x *InfoResponse) Reset() { *x = InfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[5] + mi := &file_carbonapi_v2_pb_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -347,7 +551,7 @@ func (x *InfoResponse) String() string { func (*InfoResponse) ProtoMessage() {} func (x *InfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[5] + mi := &file_carbonapi_v2_pb_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -360,7 +564,7 @@ func (x *InfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use InfoResponse.ProtoReflect.Descriptor instead. func (*InfoResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{5} + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{9} } func (x *InfoResponse) GetName() string { @@ -410,7 +614,7 @@ type ServerInfoResponse struct { func (x *ServerInfoResponse) Reset() { *x = ServerInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[6] + mi := &file_carbonapi_v2_pb_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -423,7 +627,7 @@ func (x *ServerInfoResponse) String() string { func (*ServerInfoResponse) ProtoMessage() {} func (x *ServerInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[6] + mi := &file_carbonapi_v2_pb_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -436,7 +640,7 @@ func (x *ServerInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerInfoResponse.ProtoReflect.Descriptor instead. func (*ServerInfoResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{6} + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{10} } func (x *ServerInfoResponse) GetServer() string { @@ -464,7 +668,7 @@ type ZipperInfoResponse struct { func (x *ZipperInfoResponse) Reset() { *x = ZipperInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[7] + mi := &file_carbonapi_v2_pb_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -477,7 +681,7 @@ func (x *ZipperInfoResponse) String() string { func (*ZipperInfoResponse) ProtoMessage() {} func (x *ZipperInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[7] + mi := &file_carbonapi_v2_pb_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -490,7 +694,7 @@ func (x *ZipperInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ZipperInfoResponse.ProtoReflect.Descriptor instead. func (*ZipperInfoResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{7} + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{11} } func (x *ZipperInfoResponse) GetResponses() []*ServerInfoResponse { @@ -511,7 +715,7 @@ type ListMetricsResponse struct { func (x *ListMetricsResponse) Reset() { *x = ListMetricsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[8] + mi := &file_carbonapi_v2_pb_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -524,7 +728,7 @@ func (x *ListMetricsResponse) String() string { func (*ListMetricsResponse) ProtoMessage() {} func (x *ListMetricsResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[8] + mi := &file_carbonapi_v2_pb_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -537,7 +741,7 @@ func (x *ListMetricsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListMetricsResponse.ProtoReflect.Descriptor instead. func (*ListMetricsResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{8} + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{12} } func (x *ListMetricsResponse) GetMetrics() []string { @@ -547,6 +751,53 @@ func (x *ListMetricsResponse) GetMetrics() []string { return nil } +type MetricResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *MetricResponse) Reset() { + *x = MetricResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_carbonapi_v2_pb_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetricResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricResponse) ProtoMessage() {} + +func (x *MetricResponse) ProtoReflect() protoreflect.Message { + mi := &file_carbonapi_v2_pb_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricResponse.ProtoReflect.Descriptor instead. +func (*MetricResponse) Descriptor() ([]byte, []int) { + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{13} +} + +func (x *MetricResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + // MetricDetails and MetricDetailsResponse is not guaranteed to stay the same in future releases // But we'll do our best to make them stable and only to extend them if that's necessary type MetricDetails struct { @@ -563,7 +814,7 @@ type MetricDetails struct { func (x *MetricDetails) Reset() { *x = MetricDetails{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[9] + mi := &file_carbonapi_v2_pb_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -576,7 +827,7 @@ func (x *MetricDetails) String() string { func (*MetricDetails) ProtoMessage() {} func (x *MetricDetails) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[9] + mi := &file_carbonapi_v2_pb_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -589,7 +840,7 @@ func (x *MetricDetails) ProtoReflect() protoreflect.Message { // Deprecated: Use MetricDetails.ProtoReflect.Descriptor instead. func (*MetricDetails) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{9} + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{14} } func (x *MetricDetails) GetSize() int64 { @@ -633,7 +884,7 @@ type MetricDetailsResponse struct { func (x *MetricDetailsResponse) Reset() { *x = MetricDetailsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[10] + mi := &file_carbonapi_v2_pb_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -646,7 +897,7 @@ func (x *MetricDetailsResponse) String() string { func (*MetricDetailsResponse) ProtoMessage() {} func (x *MetricDetailsResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[10] + mi := &file_carbonapi_v2_pb_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -659,7 +910,7 @@ func (x *MetricDetailsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MetricDetailsResponse.ProtoReflect.Descriptor instead. func (*MetricDetailsResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{10} + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{15} } func (x *MetricDetailsResponse) GetMetrics() map[string]*MetricDetails { @@ -683,98 +934,166 @@ func (x *MetricDetailsResponse) GetTotalSpace() uint64 { return 0 } +type ProtocolVersionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *ProtocolVersionResponse) Reset() { + *x = ProtocolVersionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_carbonapi_v2_pb_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProtocolVersionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProtocolVersionResponse) ProtoMessage() {} + +func (x *ProtocolVersionResponse) ProtoReflect() protoreflect.Message { + mi := &file_carbonapi_v2_pb_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProtocolVersionResponse.ProtoReflect.Descriptor instead. +func (*ProtocolVersionResponse) Descriptor() ([]byte, []int) { + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{16} +} + +func (x *ProtocolVersionResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + var File_carbonapi_v2_pb_proto protoreflect.FileDescriptor var file_carbonapi_v2_pb_proto_rawDesc = []byte{ 0x0a, 0x15, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, - 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x22, 0xad, 0x01, 0x0a, 0x0d, 0x46, 0x65, 0x74, - 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, - 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x73, 0x74, 0x6f, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, - 0x73, 0x74, 0x6f, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x74, 0x65, 0x70, - 0x54, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x73, 0x74, 0x65, 0x70, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x01, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, - 0x69, 0x73, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x03, 0x28, 0x08, 0x52, 0x08, - 0x69, 0x73, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x22, 0x4e, 0x0a, 0x12, 0x4d, 0x75, 0x6c, 0x74, - 0x69, 0x46, 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, - 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, - 0x62, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, - 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x37, 0x0a, 0x09, 0x47, 0x6c, 0x6f, 0x62, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x4c, - 0x65, 0x61, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x65, 0x61, - 0x66, 0x22, 0x58, 0x0a, 0x0c, 0x47, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, - 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x22, 0x5d, 0x0a, 0x09, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x65, 0x63, 0x6f, - 0x6e, 0x64, 0x73, 0x50, 0x65, 0x72, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x50, 0x65, 0x72, 0x50, 0x6f, 0x69, - 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x50, 0x6f, - 0x69, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x4f, 0x66, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x22, 0xd4, 0x01, 0x0a, 0x0c, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x2c, 0x0a, 0x11, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x61, 0x67, 0x67, 0x72, - 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x22, 0x0a, - 0x0c, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x46, 0x61, 0x63, 0x74, 0x6f, - 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x46, - 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x3a, 0x0a, 0x0a, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x61, 0x72, 0x62, - 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0x5f, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, - 0x31, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x22, 0x5c, 0x0a, 0x0c, 0x46, 0x65, 0x74, 0x63, + 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x74, + 0x6f, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x73, 0x74, + 0x6f, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x4c, 0x0a, 0x11, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x07, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, + 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x22, 0xad, 0x01, 0x0a, 0x0d, 0x46, 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x74, 0x6f, 0x70, + 0x54, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x73, 0x74, 0x6f, 0x70, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x74, 0x65, 0x70, 0x54, 0x69, 0x6d, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x73, 0x74, 0x65, 0x70, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x01, + 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x41, 0x62, + 0x73, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x03, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x41, 0x62, + 0x73, 0x65, 0x6e, 0x74, 0x22, 0x4e, 0x0a, 0x12, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x46, 0x65, 0x74, + 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x61, + 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x65, + 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x22, 0x23, 0x0a, 0x0b, 0x47, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x09, 0x47, 0x6c, 0x6f, + 0x62, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, + 0x4c, 0x65, 0x61, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x65, + 0x61, 0x66, 0x22, 0x58, 0x0a, 0x0c, 0x47, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, + 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x22, 0x21, 0x0a, 0x0b, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x5d, 0x0a, 0x09, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0f, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x50, 0x65, 0x72, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x50, 0x65, + 0x72, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x4f, 0x66, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x22, 0xd4, + 0x01, 0x0a, 0x0c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, + 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x46, + 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x78, 0x46, 0x69, + 0x6c, 0x65, 0x73, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x3a, 0x0a, 0x0a, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x04, 0x69, 0x6e, - 0x66, 0x6f, 0x22, 0x57, 0x0a, 0x12, 0x5a, 0x69, 0x70, 0x70, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x61, - 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x22, 0x2f, 0x0a, 0x13, 0x4c, - 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x07, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x6b, 0x0a, 0x0d, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, - 0x04, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x53, 0x69, 0x7a, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x07, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x41, - 0x54, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x41, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x16, 0x0a, 0x06, 0x52, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x06, 0x52, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x80, 0x02, 0x0a, 0x15, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, - 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x46, 0x72, 0x65, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x46, 0x72, 0x65, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x1e, 0x0a, 0x0a, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x70, 0x61, 0x63, 0x65, - 0x1a, 0x5a, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, - 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, - 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x31, 0x5a, 0x2f, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2d, 0x67, 0x72, - 0x61, 0x70, 0x68, 0x69, 0x74, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, - 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x5f, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, + 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x57, 0x0a, 0x12, 0x5a, 0x69, 0x70, 0x70, 0x65, 0x72, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x09, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, + 0x62, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x22, + 0x2f, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x22, 0x24, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x6b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x4d, + 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x4d, 0x6f, + 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x41, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x41, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x52, + 0x64, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x52, 0x64, 0x54, + 0x69, 0x6d, 0x65, 0x22, 0x80, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, + 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, + 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x1c, 0x0a, 0x09, + 0x46, 0x72, 0x65, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x09, 0x46, 0x72, 0x65, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x54, 0x6f, + 0x74, 0x61, 0x6c, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, + 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x70, 0x61, 0x63, 0x65, 0x1a, 0x5a, 0x0a, 0x0c, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x61, + 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x33, 0x0a, 0x17, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x31, 0x5a, 0x2f, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2d, 0x67, 0x72, 0x61, + 0x70, 0x68, 0x69, 0x74, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x63, + 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -789,34 +1108,41 @@ func file_carbonapi_v2_pb_proto_rawDescGZIP() []byte { return file_carbonapi_v2_pb_proto_rawDescData } -var file_carbonapi_v2_pb_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_carbonapi_v2_pb_proto_msgTypes = make([]protoimpl.MessageInfo, 18) var file_carbonapi_v2_pb_proto_goTypes = []interface{}{ - (*FetchResponse)(nil), // 0: carbonapi_v2_pb.FetchResponse - (*MultiFetchResponse)(nil), // 1: carbonapi_v2_pb.MultiFetchResponse - (*GlobMatch)(nil), // 2: carbonapi_v2_pb.GlobMatch - (*GlobResponse)(nil), // 3: carbonapi_v2_pb.GlobResponse - (*Retention)(nil), // 4: carbonapi_v2_pb.Retention - (*InfoResponse)(nil), // 5: carbonapi_v2_pb.InfoResponse - (*ServerInfoResponse)(nil), // 6: carbonapi_v2_pb.ServerInfoResponse - (*ZipperInfoResponse)(nil), // 7: carbonapi_v2_pb.ZipperInfoResponse - (*ListMetricsResponse)(nil), // 8: carbonapi_v2_pb.ListMetricsResponse - (*MetricDetails)(nil), // 9: carbonapi_v2_pb.MetricDetails - (*MetricDetailsResponse)(nil), // 10: carbonapi_v2_pb.MetricDetailsResponse - nil, // 11: carbonapi_v2_pb.MetricDetailsResponse.MetricsEntry + (*FetchRequest)(nil), // 0: carbonapi_v2_pb.FetchRequest + (*MultiFetchRequest)(nil), // 1: carbonapi_v2_pb.MultiFetchRequest + (*FetchResponse)(nil), // 2: carbonapi_v2_pb.FetchResponse + (*MultiFetchResponse)(nil), // 3: carbonapi_v2_pb.MultiFetchResponse + (*GlobRequest)(nil), // 4: carbonapi_v2_pb.GlobRequest + (*GlobMatch)(nil), // 5: carbonapi_v2_pb.GlobMatch + (*GlobResponse)(nil), // 6: carbonapi_v2_pb.GlobResponse + (*InfoRequest)(nil), // 7: carbonapi_v2_pb.InfoRequest + (*Retention)(nil), // 8: carbonapi_v2_pb.Retention + (*InfoResponse)(nil), // 9: carbonapi_v2_pb.InfoResponse + (*ServerInfoResponse)(nil), // 10: carbonapi_v2_pb.ServerInfoResponse + (*ZipperInfoResponse)(nil), // 11: carbonapi_v2_pb.ZipperInfoResponse + (*ListMetricsResponse)(nil), // 12: carbonapi_v2_pb.ListMetricsResponse + (*MetricResponse)(nil), // 13: carbonapi_v2_pb.MetricResponse + (*MetricDetails)(nil), // 14: carbonapi_v2_pb.MetricDetails + (*MetricDetailsResponse)(nil), // 15: carbonapi_v2_pb.MetricDetailsResponse + (*ProtocolVersionResponse)(nil), // 16: carbonapi_v2_pb.ProtocolVersionResponse + nil, // 17: carbonapi_v2_pb.MetricDetailsResponse.MetricsEntry } var file_carbonapi_v2_pb_proto_depIdxs = []int32{ - 0, // 0: carbonapi_v2_pb.MultiFetchResponse.metrics:type_name -> carbonapi_v2_pb.FetchResponse - 2, // 1: carbonapi_v2_pb.GlobResponse.matches:type_name -> carbonapi_v2_pb.GlobMatch - 4, // 2: carbonapi_v2_pb.InfoResponse.retentions:type_name -> carbonapi_v2_pb.Retention - 5, // 3: carbonapi_v2_pb.ServerInfoResponse.info:type_name -> carbonapi_v2_pb.InfoResponse - 6, // 4: carbonapi_v2_pb.ZipperInfoResponse.responses:type_name -> carbonapi_v2_pb.ServerInfoResponse - 11, // 5: carbonapi_v2_pb.MetricDetailsResponse.metrics:type_name -> carbonapi_v2_pb.MetricDetailsResponse.MetricsEntry - 9, // 6: carbonapi_v2_pb.MetricDetailsResponse.MetricsEntry.value:type_name -> carbonapi_v2_pb.MetricDetails - 7, // [7:7] is the sub-list for method output_type - 7, // [7:7] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name + 0, // 0: carbonapi_v2_pb.MultiFetchRequest.metrics:type_name -> carbonapi_v2_pb.FetchRequest + 2, // 1: carbonapi_v2_pb.MultiFetchResponse.metrics:type_name -> carbonapi_v2_pb.FetchResponse + 5, // 2: carbonapi_v2_pb.GlobResponse.matches:type_name -> carbonapi_v2_pb.GlobMatch + 8, // 3: carbonapi_v2_pb.InfoResponse.retentions:type_name -> carbonapi_v2_pb.Retention + 9, // 4: carbonapi_v2_pb.ServerInfoResponse.info:type_name -> carbonapi_v2_pb.InfoResponse + 10, // 5: carbonapi_v2_pb.ZipperInfoResponse.responses:type_name -> carbonapi_v2_pb.ServerInfoResponse + 17, // 6: carbonapi_v2_pb.MetricDetailsResponse.metrics:type_name -> carbonapi_v2_pb.MetricDetailsResponse.MetricsEntry + 14, // 7: carbonapi_v2_pb.MetricDetailsResponse.MetricsEntry.value:type_name -> carbonapi_v2_pb.MetricDetails + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name } func init() { file_carbonapi_v2_pb_proto_init() } @@ -826,7 +1152,7 @@ func file_carbonapi_v2_pb_proto_init() { } if !protoimpl.UnsafeEnabled { file_carbonapi_v2_pb_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FetchResponse); i { + switch v := v.(*FetchRequest); i { case 0: return &v.state case 1: @@ -838,7 +1164,7 @@ func file_carbonapi_v2_pb_proto_init() { } } file_carbonapi_v2_pb_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MultiFetchResponse); i { + switch v := v.(*MultiFetchRequest); i { case 0: return &v.state case 1: @@ -850,7 +1176,7 @@ func file_carbonapi_v2_pb_proto_init() { } } file_carbonapi_v2_pb_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GlobMatch); i { + switch v := v.(*FetchResponse); i { case 0: return &v.state case 1: @@ -862,7 +1188,7 @@ func file_carbonapi_v2_pb_proto_init() { } } file_carbonapi_v2_pb_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GlobResponse); i { + switch v := v.(*MultiFetchResponse); i { case 0: return &v.state case 1: @@ -874,7 +1200,7 @@ func file_carbonapi_v2_pb_proto_init() { } } file_carbonapi_v2_pb_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Retention); i { + switch v := v.(*GlobRequest); i { case 0: return &v.state case 1: @@ -886,7 +1212,7 @@ func file_carbonapi_v2_pb_proto_init() { } } file_carbonapi_v2_pb_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InfoResponse); i { + switch v := v.(*GlobMatch); i { case 0: return &v.state case 1: @@ -898,7 +1224,7 @@ func file_carbonapi_v2_pb_proto_init() { } } file_carbonapi_v2_pb_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerInfoResponse); i { + switch v := v.(*GlobResponse); i { case 0: return &v.state case 1: @@ -910,7 +1236,7 @@ func file_carbonapi_v2_pb_proto_init() { } } file_carbonapi_v2_pb_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ZipperInfoResponse); i { + switch v := v.(*InfoRequest); i { case 0: return &v.state case 1: @@ -922,7 +1248,7 @@ func file_carbonapi_v2_pb_proto_init() { } } file_carbonapi_v2_pb_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListMetricsResponse); i { + switch v := v.(*Retention); i { case 0: return &v.state case 1: @@ -934,7 +1260,7 @@ func file_carbonapi_v2_pb_proto_init() { } } file_carbonapi_v2_pb_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetricDetails); i { + switch v := v.(*InfoResponse); i { case 0: return &v.state case 1: @@ -946,6 +1272,66 @@ func file_carbonapi_v2_pb_proto_init() { } } file_carbonapi_v2_pb_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerInfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_carbonapi_v2_pb_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ZipperInfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_carbonapi_v2_pb_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListMetricsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_carbonapi_v2_pb_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_carbonapi_v2_pb_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricDetails); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_carbonapi_v2_pb_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MetricDetailsResponse); i { case 0: return &v.state @@ -957,6 +1343,18 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } + file_carbonapi_v2_pb_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProtocolVersionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -964,7 +1362,7 @@ func file_carbonapi_v2_pb_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_carbonapi_v2_pb_proto_rawDesc, NumEnums: 0, - NumMessages: 12, + NumMessages: 18, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.proto b/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.proto index 2bc77d4b5..cac4b6431 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.proto +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.proto @@ -3,27 +3,45 @@ package carbonapi_v2_pb; option go_package = "github.com/go-graphite/protocol/carbonapi_v2_pb"; +message FetchRequest { + string name = 1; + int32 startTime = 2; + int32 stopTime = 3; +} + +message MultiFetchRequest { + repeated FetchRequest metrics = 1; +} + message FetchResponse { - string name = 1; - int32 startTime = 2; - int32 stopTime = 3; - int32 stepTime = 4; - repeated double values = 5; - repeated bool isAbsent = 6; + string name = 1; + int32 startTime = 2; + int32 stopTime = 3; + int32 stepTime = 4; + repeated double values = 5; + repeated bool isAbsent = 6; } message MultiFetchResponse { - repeated FetchResponse metrics = 1; + repeated FetchResponse metrics = 1; +} + +message GlobRequest { + string query = 1; } message GlobMatch { - string path = 1; - bool isLeaf = 2; + string path = 1; + bool isLeaf = 2; } message GlobResponse { - string name = 1; - repeated GlobMatch matches = 2; + string name = 1; + repeated GlobMatch matches = 2; +} + +message InfoRequest { + string name = 1; } message Retention { @@ -52,6 +70,10 @@ message ListMetricsResponse { repeated string Metrics = 1; } +message MetricResponse { + string name = 1; +} + // MetricDetails and MetricDetailsResponse is not guaranteed to stay the same in future releases // But we'll do our best to make them stable and only to extend them if that's necessary message MetricDetails { @@ -66,3 +88,7 @@ message MetricDetailsResponse { uint64 FreeSpace = 2; uint64 TotalSpace = 3; } + +message ProtocolVersionResponse { + string version = 1; +} \ No newline at end of file diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb_vtproto.pb.go b/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb_vtproto.pb.go index f93ff1c37..feed513cf 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb_vtproto.pb.go +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb_vtproto.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.0.0-20210524170403-d462593d1bfb +// protoc-gen-go-vtproto version: v0.3.0 // source: carbonapi_v2_pb.proto package carbonapi_v2_pb @@ -20,6 +20,410 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (this *FetchRequest) EqualVT(that *FetchRequest) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Name != that.Name { + return false + } + if this.StartTime != that.StartTime { + return false + } + if this.StopTime != that.StopTime { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *MultiFetchRequest) EqualVT(that *MultiFetchRequest) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if len(this.Metrics) != len(that.Metrics) { + return false + } + for i := range this.Metrics { + if !this.Metrics[i].EqualVT(that.Metrics[i]) { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *FetchResponse) EqualVT(that *FetchResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Name != that.Name { + return false + } + if this.StartTime != that.StartTime { + return false + } + if this.StopTime != that.StopTime { + return false + } + if this.StepTime != that.StepTime { + return false + } + if len(this.Values) != len(that.Values) { + return false + } + for i := range this.Values { + if this.Values[i] != that.Values[i] { + return false + } + } + if len(this.IsAbsent) != len(that.IsAbsent) { + return false + } + for i := range this.IsAbsent { + if this.IsAbsent[i] != that.IsAbsent[i] { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *MultiFetchResponse) EqualVT(that *MultiFetchResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if len(this.Metrics) != len(that.Metrics) { + return false + } + for i := range this.Metrics { + if !this.Metrics[i].EqualVT(that.Metrics[i]) { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *GlobRequest) EqualVT(that *GlobRequest) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Query != that.Query { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *GlobMatch) EqualVT(that *GlobMatch) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Path != that.Path { + return false + } + if this.IsLeaf != that.IsLeaf { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *GlobResponse) EqualVT(that *GlobResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Name != that.Name { + return false + } + if len(this.Matches) != len(that.Matches) { + return false + } + for i := range this.Matches { + if !this.Matches[i].EqualVT(that.Matches[i]) { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *InfoRequest) EqualVT(that *InfoRequest) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Name != that.Name { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *Retention) EqualVT(that *Retention) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.SecondsPerPoint != that.SecondsPerPoint { + return false + } + if this.NumberOfPoints != that.NumberOfPoints { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *InfoResponse) EqualVT(that *InfoResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Name != that.Name { + return false + } + if this.AggregationMethod != that.AggregationMethod { + return false + } + if this.MaxRetention != that.MaxRetention { + return false + } + if this.XFilesFactor != that.XFilesFactor { + return false + } + if len(this.Retentions) != len(that.Retentions) { + return false + } + for i := range this.Retentions { + if !this.Retentions[i].EqualVT(that.Retentions[i]) { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *ServerInfoResponse) EqualVT(that *ServerInfoResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Server != that.Server { + return false + } + if !this.Info.EqualVT(that.Info) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *ZipperInfoResponse) EqualVT(that *ZipperInfoResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if len(this.Responses) != len(that.Responses) { + return false + } + for i := range this.Responses { + if !this.Responses[i].EqualVT(that.Responses[i]) { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *ListMetricsResponse) EqualVT(that *ListMetricsResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if len(this.Metrics) != len(that.Metrics) { + return false + } + for i := range this.Metrics { + if this.Metrics[i] != that.Metrics[i] { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *MetricResponse) EqualVT(that *MetricResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Name != that.Name { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *MetricDetails) EqualVT(that *MetricDetails) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Size != that.Size { + return false + } + if this.ModTime != that.ModTime { + return false + } + if this.ATime != that.ATime { + return false + } + if this.RdTime != that.RdTime { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *MetricDetailsResponse) EqualVT(that *MetricDetailsResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if len(this.Metrics) != len(that.Metrics) { + return false + } + for i := range this.Metrics { + if !this.Metrics[i].EqualVT(that.Metrics[i]) { + return false + } + } + if this.FreeSpace != that.FreeSpace { + return false + } + if this.TotalSpace != that.TotalSpace { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *ProtocolVersionResponse) EqualVT(that *ProtocolVersionResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Version != that.Version { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (m *FetchRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FetchRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *FetchRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.StopTime != 0 { + i = encodeVarint(dAtA, i, uint64(m.StopTime)) + i-- + dAtA[i] = 0x18 + } + if m.StartTime != 0 { + i = encodeVarint(dAtA, i, uint64(m.StartTime)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MultiFetchRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MultiFetchRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MultiFetchRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Metrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *FetchResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -130,14 +534,12 @@ func (m *MultiFetchResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { } if len(m.Metrics) > 0 { for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + size, err := m.Metrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } @@ -145,6 +547,46 @@ func (m *MultiFetchResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *GlobRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GlobRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GlobRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *GlobMatch) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -227,14 +669,12 @@ func (m *GlobResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { } if len(m.Matches) > 0 { for iNdEx := len(m.Matches) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Matches[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + size, err := m.Matches[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0x12 } @@ -249,7 +689,7 @@ func (m *GlobResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Retention) MarshalVT() (dAtA []byte, err error) { +func (m *InfoRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -262,12 +702,12 @@ func (m *Retention) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Retention) MarshalToVT(dAtA []byte) (int, error) { +func (m *InfoRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *Retention) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *InfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -279,20 +719,17 @@ func (m *Retention) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.NumberOfPoints != 0 { - i = encodeVarint(dAtA, i, uint64(m.NumberOfPoints)) - i-- - dAtA[i] = 0x10 - } - if m.SecondsPerPoint != 0 { - i = encodeVarint(dAtA, i, uint64(m.SecondsPerPoint)) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *InfoResponse) MarshalVT() (dAtA []byte, err error) { +func (m *Retention) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -305,7 +742,50 @@ func (m *InfoResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InfoResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *Retention) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Retention) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.NumberOfPoints != 0 { + i = encodeVarint(dAtA, i, uint64(m.NumberOfPoints)) + i-- + dAtA[i] = 0x10 + } + if m.SecondsPerPoint != 0 { + i = encodeVarint(dAtA, i, uint64(m.SecondsPerPoint)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *InfoResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InfoResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } @@ -324,14 +804,12 @@ func (m *InfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { } if len(m.Retentions) > 0 { for iNdEx := len(m.Retentions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Retentions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + size, err := m.Retentions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0x2a } @@ -395,14 +873,12 @@ func (m *ServerInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { copy(dAtA[i:], m.unknownFields) } if m.Info != nil { - { - size, err := m.Info.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + size, err := m.Info.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0x12 } @@ -448,14 +924,12 @@ func (m *ZipperInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { } if len(m.Responses) > 0 { for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Responses[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + size, err := m.Responses[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } @@ -505,6 +979,46 @@ func (m *ListMetricsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *MetricResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MetricResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *MetricDetails) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -602,14 +1116,12 @@ func (m *MetricDetailsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) for k := range m.Metrics { v := m.Metrics[k] baseI := i - { - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0x12 i -= len(k) @@ -625,6 +1137,46 @@ func (m *MetricDetailsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *ProtocolVersionResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProtocolVersionResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ProtocolVersionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarint(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarint(dAtA []byte, offset int, v uint64) int { offset -= sov(v) base := offset @@ -636,6 +1188,46 @@ func encodeVarint(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *FetchRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.StartTime != 0 { + n += 1 + sov(uint64(m.StartTime)) + } + if m.StopTime != 0 { + n += 1 + sov(uint64(m.StopTime)) + } + if m.unknownFields != nil { + n += len(m.unknownFields) + } + return n +} + +func (m *MultiFetchRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.unknownFields != nil { + n += len(m.unknownFields) + } + return n +} + func (m *FetchResponse) SizeVT() (n int) { if m == nil { return 0 @@ -685,6 +1277,22 @@ func (m *MultiFetchResponse) SizeVT() (n int) { return n } +func (m *GlobRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.unknownFields != nil { + n += len(m.unknownFields) + } + return n +} + func (m *GlobMatch) SizeVT() (n int) { if m == nil { return 0 @@ -726,6 +1334,22 @@ func (m *GlobResponse) SizeVT() (n int) { return n } +func (m *InfoRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.unknownFields != nil { + n += len(m.unknownFields) + } + return n +} + func (m *Retention) SizeVT() (n int) { if m == nil { return 0 @@ -832,6 +1456,22 @@ func (m *ListMetricsResponse) SizeVT() (n int) { return n } +func (m *MetricResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.unknownFields != nil { + n += len(m.unknownFields) + } + return n +} + func (m *MetricDetails) SizeVT() (n int) { if m == nil { return 0 @@ -887,13 +1527,29 @@ func (m *MetricDetailsResponse) SizeVT() (n int) { return n } +func (m *ProtocolVersionResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.unknownFields != nil { + n += len(m.unknownFields) + } + return n +} + func sov(x uint64) (n int) { return (bits.Len64(x|1) + 6) / 7 } func soz(x uint64) (n int) { return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *FetchResponse) UnmarshalVT(dAtA []byte) error { +func (m *FetchRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -916,10 +1572,10 @@ func (m *FetchResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FetchResponse: wiretype end group for non-group") + return fmt.Errorf("proto: FetchRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FetchResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FetchRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -992,24 +1648,230 @@ func (m *FetchResponse) UnmarshalVT(dAtA []byte) error { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StepTime", wireType) - } - m.StepTime = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StepTime |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MultiFetchRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MultiFetchRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MultiFetchRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, &FetchRequest{}) + if err := m.Metrics[len(m.Metrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FetchResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FetchResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FetchResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + m.StartTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTime |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StopTime", wireType) + } + m.StopTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StopTime |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StepTime", wireType) + } + m.StepTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StepTime |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } case 5: if wireType == 1 { @@ -1157,7 +2019,92 @@ func (m *FetchResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *MultiFetchResponse) UnmarshalVT(dAtA []byte) error { +func (m *MultiFetchResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MultiFetchResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MultiFetchResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, &FetchResponse{}) + if err := m.Metrics[len(m.Metrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GlobRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1180,17 +2127,17 @@ func (m *MultiFetchResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MultiFetchResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GlobRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MultiFetchResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GlobRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -1200,25 +2147,23 @@ func (m *MultiFetchResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Metrics = append(m.Metrics, &FetchResponse{}) - if err := m.Metrics[len(m.Metrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Query = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -1462,6 +2407,89 @@ func (m *GlobResponse) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *InfoRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Retention) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2017,6 +3045,89 @@ func (m *ListMetricsResponse) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *MetricResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *MetricDetails) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2362,6 +3473,89 @@ func (m *MetricDetailsResponse) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *ProtocolVersionResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProtocolVersionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProtocolVersionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skip(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v3_pb/carbonapi_v3_pb.pb.go b/vendor/github.com/go-graphite/protocol/carbonapi_v3_pb/carbonapi_v3_pb.pb.go index 0a86f13b8..954d08678 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v3_pb/carbonapi_v3_pb.pb.go +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v3_pb/carbonapi_v3_pb.pb.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel -// protoc v3.12.4 -// source: carbonapi_v3_pb.proto +// protoc-gen-go v1.28.0 +// protoc v3.19.4 +// source: carbonapi_v3_pb/carbonapi_v3_pb.proto package carbonapi_v3_pb @@ -32,7 +32,7 @@ type FilteringFunction struct { func (x *FilteringFunction) Reset() { *x = FilteringFunction{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[0] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -45,7 +45,7 @@ func (x *FilteringFunction) String() string { func (*FilteringFunction) ProtoMessage() {} func (x *FilteringFunction) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[0] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -58,7 +58,7 @@ func (x *FilteringFunction) ProtoReflect() protoreflect.Message { // Deprecated: Use FilteringFunction.ProtoReflect.Descriptor instead. func (*FilteringFunction) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{0} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{0} } func (x *FilteringFunction) GetName() string { @@ -85,7 +85,7 @@ type CapabilityRequest struct { func (x *CapabilityRequest) Reset() { *x = CapabilityRequest{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[1] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -98,7 +98,7 @@ func (x *CapabilityRequest) String() string { func (*CapabilityRequest) ProtoMessage() {} func (x *CapabilityRequest) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[1] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -111,7 +111,7 @@ func (x *CapabilityRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CapabilityRequest.ProtoReflect.Descriptor instead. func (*CapabilityRequest) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{1} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{1} } // Storage capability information @@ -134,7 +134,7 @@ type CapabilityResponse struct { func (x *CapabilityResponse) Reset() { *x = CapabilityResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[2] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -147,7 +147,7 @@ func (x *CapabilityResponse) String() string { func (*CapabilityResponse) ProtoMessage() {} func (x *CapabilityResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[2] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -160,7 +160,7 @@ func (x *CapabilityResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CapabilityResponse.ProtoReflect.Descriptor instead. func (*CapabilityResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{2} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{2} } func (x *CapabilityResponse) GetSupportedProtocols() []string { @@ -224,7 +224,7 @@ type FetchRequest struct { func (x *FetchRequest) Reset() { *x = FetchRequest{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[3] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -237,7 +237,7 @@ func (x *FetchRequest) String() string { func (*FetchRequest) ProtoMessage() {} func (x *FetchRequest) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[3] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -250,7 +250,7 @@ func (x *FetchRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FetchRequest.ProtoReflect.Descriptor instead. func (*FetchRequest) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{3} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{3} } func (x *FetchRequest) GetName() string { @@ -313,7 +313,7 @@ type MultiFetchRequest struct { func (x *MultiFetchRequest) Reset() { *x = MultiFetchRequest{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[4] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -326,7 +326,7 @@ func (x *MultiFetchRequest) String() string { func (*MultiFetchRequest) ProtoMessage() {} func (x *MultiFetchRequest) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[4] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -339,7 +339,7 @@ func (x *MultiFetchRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MultiFetchRequest.ProtoReflect.Descriptor instead. func (*MultiFetchRequest) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{4} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{4} } func (x *MultiFetchRequest) GetMetrics() []*FetchRequest { @@ -373,7 +373,7 @@ type FetchResponse struct { func (x *FetchResponse) Reset() { *x = FetchResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[5] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -386,7 +386,7 @@ func (x *FetchResponse) String() string { func (*FetchResponse) ProtoMessage() {} func (x *FetchResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[5] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -399,7 +399,7 @@ func (x *FetchResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FetchResponse.ProtoReflect.Descriptor instead. func (*FetchResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{5} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{5} } func (x *FetchResponse) GetName() string { @@ -497,7 +497,7 @@ type MultiFetchResponse struct { func (x *MultiFetchResponse) Reset() { *x = MultiFetchResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[6] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -510,7 +510,7 @@ func (x *MultiFetchResponse) String() string { func (*MultiFetchResponse) ProtoMessage() {} func (x *MultiFetchResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[6] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -523,7 +523,7 @@ func (x *MultiFetchResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MultiFetchResponse.ProtoReflect.Descriptor instead. func (*MultiFetchResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{6} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{6} } func (x *MultiFetchResponse) GetMetrics() []*FetchResponse { @@ -547,7 +547,7 @@ type MultiGlobRequest struct { func (x *MultiGlobRequest) Reset() { *x = MultiGlobRequest{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[7] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -560,7 +560,7 @@ func (x *MultiGlobRequest) String() string { func (*MultiGlobRequest) ProtoMessage() {} func (x *MultiGlobRequest) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[7] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -573,7 +573,7 @@ func (x *MultiGlobRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MultiGlobRequest.ProtoReflect.Descriptor instead. func (*MultiGlobRequest) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{7} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{7} } func (x *MultiGlobRequest) GetMetrics() []string { @@ -609,7 +609,7 @@ type GlobMatch struct { func (x *GlobMatch) Reset() { *x = GlobMatch{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[8] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -622,7 +622,7 @@ func (x *GlobMatch) String() string { func (*GlobMatch) ProtoMessage() {} func (x *GlobMatch) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[8] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -635,7 +635,7 @@ func (x *GlobMatch) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobMatch.ProtoReflect.Descriptor instead. func (*GlobMatch) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{8} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{8} } func (x *GlobMatch) GetPath() string { @@ -665,7 +665,7 @@ type GlobResponse struct { func (x *GlobResponse) Reset() { *x = GlobResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[9] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -678,7 +678,7 @@ func (x *GlobResponse) String() string { func (*GlobResponse) ProtoMessage() {} func (x *GlobResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[9] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -691,7 +691,7 @@ func (x *GlobResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobResponse.ProtoReflect.Descriptor instead. func (*GlobResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{9} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{9} } func (x *GlobResponse) GetName() string { @@ -719,7 +719,7 @@ type MultiGlobResponse struct { func (x *MultiGlobResponse) Reset() { *x = MultiGlobResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[10] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -732,7 +732,7 @@ func (x *MultiGlobResponse) String() string { func (*MultiGlobResponse) ProtoMessage() {} func (x *MultiGlobResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[10] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -745,7 +745,7 @@ func (x *MultiGlobResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MultiGlobResponse.ProtoReflect.Descriptor instead. func (*MultiGlobResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{10} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{10} } func (x *MultiGlobResponse) GetMetrics() []*GlobResponse { @@ -767,7 +767,7 @@ type MetricsInfoRequest struct { func (x *MetricsInfoRequest) Reset() { *x = MetricsInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[11] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -780,7 +780,7 @@ func (x *MetricsInfoRequest) String() string { func (*MetricsInfoRequest) ProtoMessage() {} func (x *MetricsInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[11] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -793,7 +793,7 @@ func (x *MetricsInfoRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MetricsInfoRequest.ProtoReflect.Descriptor instead. func (*MetricsInfoRequest) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{11} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{11} } func (x *MetricsInfoRequest) GetName() string { @@ -814,7 +814,7 @@ type MultiMetricsInfoRequest struct { func (x *MultiMetricsInfoRequest) Reset() { *x = MultiMetricsInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[12] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -827,7 +827,7 @@ func (x *MultiMetricsInfoRequest) String() string { func (*MultiMetricsInfoRequest) ProtoMessage() {} func (x *MultiMetricsInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[12] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -840,7 +840,7 @@ func (x *MultiMetricsInfoRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MultiMetricsInfoRequest.ProtoReflect.Descriptor instead. func (*MultiMetricsInfoRequest) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{12} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{12} } func (x *MultiMetricsInfoRequest) GetNames() []string { @@ -862,7 +862,7 @@ type Retention struct { func (x *Retention) Reset() { *x = Retention{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[13] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -875,7 +875,7 @@ func (x *Retention) String() string { func (*Retention) ProtoMessage() {} func (x *Retention) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[13] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -888,7 +888,7 @@ func (x *Retention) ProtoReflect() protoreflect.Message { // Deprecated: Use Retention.ProtoReflect.Descriptor instead. func (*Retention) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{13} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{13} } func (x *Retention) GetSecondsPerPoint() int64 { @@ -920,7 +920,7 @@ type MetricsInfoResponse struct { func (x *MetricsInfoResponse) Reset() { *x = MetricsInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[14] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -933,7 +933,7 @@ func (x *MetricsInfoResponse) String() string { func (*MetricsInfoResponse) ProtoMessage() {} func (x *MetricsInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[14] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -946,7 +946,7 @@ func (x *MetricsInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MetricsInfoResponse.ProtoReflect.Descriptor instead. func (*MetricsInfoResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{14} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{14} } func (x *MetricsInfoResponse) GetName() string { @@ -995,7 +995,7 @@ type MultiMetricsInfoResponse struct { func (x *MultiMetricsInfoResponse) Reset() { *x = MultiMetricsInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[15] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1008,7 +1008,7 @@ func (x *MultiMetricsInfoResponse) String() string { func (*MultiMetricsInfoResponse) ProtoMessage() {} func (x *MultiMetricsInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[15] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1021,7 +1021,7 @@ func (x *MultiMetricsInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MultiMetricsInfoResponse.ProtoReflect.Descriptor instead. func (*MultiMetricsInfoResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{15} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{15} } func (x *MultiMetricsInfoResponse) GetMetrics() []*MetricsInfoResponse { @@ -1043,7 +1043,7 @@ type ZipperInfoResponse struct { func (x *ZipperInfoResponse) Reset() { *x = ZipperInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[16] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1056,7 +1056,7 @@ func (x *ZipperInfoResponse) String() string { func (*ZipperInfoResponse) ProtoMessage() {} func (x *ZipperInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[16] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1069,7 +1069,7 @@ func (x *ZipperInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ZipperInfoResponse.ProtoReflect.Descriptor instead. func (*ZipperInfoResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{16} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{16} } func (x *ZipperInfoResponse) GetInfo() map[string]*MultiMetricsInfoResponse { @@ -1091,7 +1091,7 @@ type ListMetricsResponse struct { func (x *ListMetricsResponse) Reset() { *x = ListMetricsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[17] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1104,7 +1104,7 @@ func (x *ListMetricsResponse) String() string { func (*ListMetricsResponse) ProtoMessage() {} func (x *ListMetricsResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[17] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1117,7 +1117,7 @@ func (x *ListMetricsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListMetricsResponse.ProtoReflect.Descriptor instead. func (*ListMetricsResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{17} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{17} } func (x *ListMetricsResponse) GetMetrics() []string { @@ -1143,7 +1143,7 @@ type MetricDetails struct { func (x *MetricDetails) Reset() { *x = MetricDetails{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[18] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1156,7 +1156,7 @@ func (x *MetricDetails) String() string { func (*MetricDetails) ProtoMessage() {} func (x *MetricDetails) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[18] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1169,7 +1169,7 @@ func (x *MetricDetails) ProtoReflect() protoreflect.Message { // Deprecated: Use MetricDetails.ProtoReflect.Descriptor instead. func (*MetricDetails) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{18} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{18} } func (x *MetricDetails) GetSize() int64 { @@ -1220,7 +1220,7 @@ type MetricDetailsResponse struct { func (x *MetricDetailsResponse) Reset() { *x = MetricDetailsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[19] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1233,7 +1233,7 @@ func (x *MetricDetailsResponse) String() string { func (*MetricDetailsResponse) ProtoMessage() {} func (x *MetricDetailsResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[19] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1246,7 +1246,7 @@ func (x *MetricDetailsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MetricDetailsResponse.ProtoReflect.Descriptor instead. func (*MetricDetailsResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{19} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{19} } func (x *MetricDetailsResponse) GetMetrics() map[string]*MetricDetails { @@ -1281,7 +1281,7 @@ type MultiDetailsResponse struct { func (x *MultiDetailsResponse) Reset() { *x = MultiDetailsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v3_pb_proto_msgTypes[20] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1294,7 +1294,7 @@ func (x *MultiDetailsResponse) String() string { func (*MultiDetailsResponse) ProtoMessage() {} func (x *MultiDetailsResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v3_pb_proto_msgTypes[20] + mi := &file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1307,7 +1307,7 @@ func (x *MultiDetailsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MultiDetailsResponse.ProtoReflect.Descriptor instead. func (*MultiDetailsResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v3_pb_proto_rawDescGZIP(), []int{20} + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP(), []int{20} } func (x *MultiDetailsResponse) GetMetrics() map[string]*MetricDetailsResponse { @@ -1317,10 +1317,11 @@ func (x *MultiDetailsResponse) GetMetrics() map[string]*MetricDetailsResponse { return nil } -var File_carbonapi_v3_pb_proto protoreflect.FileDescriptor +var File_carbonapi_v3_pb_carbonapi_v3_pb_proto protoreflect.FileDescriptor -var file_carbonapi_v3_pb_proto_rawDesc = []byte{ - 0x0a, 0x15, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x5f, 0x70, +var file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x5f, 0x70, + 0x62, 0x2f, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x5f, 0x70, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x5f, 0x70, 0x62, 0x22, 0x45, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, @@ -1517,19 +1518,19 @@ var file_carbonapi_v3_pb_proto_rawDesc = []byte{ } var ( - file_carbonapi_v3_pb_proto_rawDescOnce sync.Once - file_carbonapi_v3_pb_proto_rawDescData = file_carbonapi_v3_pb_proto_rawDesc + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescOnce sync.Once + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescData = file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDesc ) -func file_carbonapi_v3_pb_proto_rawDescGZIP() []byte { - file_carbonapi_v3_pb_proto_rawDescOnce.Do(func() { - file_carbonapi_v3_pb_proto_rawDescData = protoimpl.X.CompressGZIP(file_carbonapi_v3_pb_proto_rawDescData) +func file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescGZIP() []byte { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescOnce.Do(func() { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescData = protoimpl.X.CompressGZIP(file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescData) }) - return file_carbonapi_v3_pb_proto_rawDescData + return file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDescData } -var file_carbonapi_v3_pb_proto_msgTypes = make([]protoimpl.MessageInfo, 24) -var file_carbonapi_v3_pb_proto_goTypes = []interface{}{ +var file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes = make([]protoimpl.MessageInfo, 24) +var file_carbonapi_v3_pb_carbonapi_v3_pb_proto_goTypes = []interface{}{ (*FilteringFunction)(nil), // 0: carbonapi_v3_pb.FilteringFunction (*CapabilityRequest)(nil), // 1: carbonapi_v3_pb.CapabilityRequest (*CapabilityResponse)(nil), // 2: carbonapi_v3_pb.CapabilityResponse @@ -1555,7 +1556,7 @@ var file_carbonapi_v3_pb_proto_goTypes = []interface{}{ nil, // 22: carbonapi_v3_pb.MetricDetailsResponse.MetricsEntry nil, // 23: carbonapi_v3_pb.MultiDetailsResponse.MetricsEntry } -var file_carbonapi_v3_pb_proto_depIdxs = []int32{ +var file_carbonapi_v3_pb_carbonapi_v3_pb_proto_depIdxs = []int32{ 0, // 0: carbonapi_v3_pb.FetchRequest.filterFunctions:type_name -> carbonapi_v3_pb.FilteringFunction 3, // 1: carbonapi_v3_pb.MultiFetchRequest.metrics:type_name -> carbonapi_v3_pb.FetchRequest 5, // 2: carbonapi_v3_pb.MultiFetchResponse.metrics:type_name -> carbonapi_v3_pb.FetchResponse @@ -1576,13 +1577,13 @@ var file_carbonapi_v3_pb_proto_depIdxs = []int32{ 0, // [0:13] is the sub-list for field type_name } -func init() { file_carbonapi_v3_pb_proto_init() } -func file_carbonapi_v3_pb_proto_init() { - if File_carbonapi_v3_pb_proto != nil { +func init() { file_carbonapi_v3_pb_carbonapi_v3_pb_proto_init() } +func file_carbonapi_v3_pb_carbonapi_v3_pb_proto_init() { + if File_carbonapi_v3_pb_carbonapi_v3_pb_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_carbonapi_v3_pb_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FilteringFunction); i { case 0: return &v.state @@ -1594,7 +1595,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CapabilityRequest); i { case 0: return &v.state @@ -1606,7 +1607,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CapabilityResponse); i { case 0: return &v.state @@ -1618,7 +1619,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FetchRequest); i { case 0: return &v.state @@ -1630,7 +1631,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MultiFetchRequest); i { case 0: return &v.state @@ -1642,7 +1643,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FetchResponse); i { case 0: return &v.state @@ -1654,7 +1655,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MultiFetchResponse); i { case 0: return &v.state @@ -1666,7 +1667,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MultiGlobRequest); i { case 0: return &v.state @@ -1678,7 +1679,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GlobMatch); i { case 0: return &v.state @@ -1690,7 +1691,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GlobResponse); i { case 0: return &v.state @@ -1702,7 +1703,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MultiGlobResponse); i { case 0: return &v.state @@ -1714,7 +1715,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MetricsInfoRequest); i { case 0: return &v.state @@ -1726,7 +1727,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MultiMetricsInfoRequest); i { case 0: return &v.state @@ -1738,7 +1739,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Retention); i { case 0: return &v.state @@ -1750,7 +1751,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MetricsInfoResponse); i { case 0: return &v.state @@ -1762,7 +1763,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MultiMetricsInfoResponse); i { case 0: return &v.state @@ -1774,7 +1775,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ZipperInfoResponse); i { case 0: return &v.state @@ -1786,7 +1787,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListMetricsResponse); i { case 0: return &v.state @@ -1798,7 +1799,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MetricDetails); i { case 0: return &v.state @@ -1810,7 +1811,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MetricDetailsResponse); i { case 0: return &v.state @@ -1822,7 +1823,7 @@ func file_carbonapi_v3_pb_proto_init() { return nil } } - file_carbonapi_v3_pb_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MultiDetailsResponse); i { case 0: return &v.state @@ -1839,18 +1840,18 @@ func file_carbonapi_v3_pb_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_carbonapi_v3_pb_proto_rawDesc, + RawDescriptor: file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDesc, NumEnums: 0, NumMessages: 24, NumExtensions: 0, NumServices: 0, }, - GoTypes: file_carbonapi_v3_pb_proto_goTypes, - DependencyIndexes: file_carbonapi_v3_pb_proto_depIdxs, - MessageInfos: file_carbonapi_v3_pb_proto_msgTypes, + GoTypes: file_carbonapi_v3_pb_carbonapi_v3_pb_proto_goTypes, + DependencyIndexes: file_carbonapi_v3_pb_carbonapi_v3_pb_proto_depIdxs, + MessageInfos: file_carbonapi_v3_pb_carbonapi_v3_pb_proto_msgTypes, }.Build() - File_carbonapi_v3_pb_proto = out.File - file_carbonapi_v3_pb_proto_rawDesc = nil - file_carbonapi_v3_pb_proto_goTypes = nil - file_carbonapi_v3_pb_proto_depIdxs = nil + File_carbonapi_v3_pb_carbonapi_v3_pb_proto = out.File + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_rawDesc = nil + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_goTypes = nil + file_carbonapi_v3_pb_carbonapi_v3_pb_proto_depIdxs = nil } diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v3_pb/carbonapi_v3_pb_vtproto.pb.go b/vendor/github.com/go-graphite/protocol/carbonapi_v3_pb/carbonapi_v3_pb_vtproto.pb.go index 56a315fad..0a85858cc 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v3_pb/carbonapi_v3_pb_vtproto.pb.go +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v3_pb/carbonapi_v3_pb_vtproto.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.0.0-20210524170403-d462593d1bfb -// source: carbonapi_v3_pb.proto +// protoc-gen-go-vtproto version: v0.3.0 +// source: carbonapi_v3_pb/carbonapi_v3_pb.proto package carbonapi_v3_pb @@ -20,6 +20,454 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (this *FilteringFunction) EqualVT(that *FilteringFunction) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Name != that.Name { + return false + } + if len(this.Arguments) != len(that.Arguments) { + return false + } + for i := range this.Arguments { + if this.Arguments[i] != that.Arguments[i] { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *CapabilityRequest) EqualVT(that *CapabilityRequest) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *CapabilityResponse) EqualVT(that *CapabilityResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if len(this.SupportedProtocols) != len(that.SupportedProtocols) { + return false + } + for i := range this.SupportedProtocols { + if this.SupportedProtocols[i] != that.SupportedProtocols[i] { + return false + } + } + if this.Name != that.Name { + return false + } + if this.HighPrecisionTimestamps != that.HighPrecisionTimestamps { + return false + } + if this.SupportFilteringFunctions != that.SupportFilteringFunctions { + return false + } + if this.LikeSplittedRequests != that.LikeSplittedRequests { + return false + } + if this.SupportStreaming != that.SupportStreaming { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *FetchRequest) EqualVT(that *FetchRequest) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Name != that.Name { + return false + } + if this.StartTime != that.StartTime { + return false + } + if this.StopTime != that.StopTime { + return false + } + if this.HighPrecisionTimestamps != that.HighPrecisionTimestamps { + return false + } + if this.PathExpression != that.PathExpression { + return false + } + if len(this.FilterFunctions) != len(that.FilterFunctions) { + return false + } + for i := range this.FilterFunctions { + if !this.FilterFunctions[i].EqualVT(that.FilterFunctions[i]) { + return false + } + } + if this.MaxDataPoints != that.MaxDataPoints { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *MultiFetchRequest) EqualVT(that *MultiFetchRequest) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if len(this.Metrics) != len(that.Metrics) { + return false + } + for i := range this.Metrics { + if !this.Metrics[i].EqualVT(that.Metrics[i]) { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *FetchResponse) EqualVT(that *FetchResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Name != that.Name { + return false + } + if this.PathExpression != that.PathExpression { + return false + } + if this.ConsolidationFunc != that.ConsolidationFunc { + return false + } + if this.StartTime != that.StartTime { + return false + } + if this.StopTime != that.StopTime { + return false + } + if this.StepTime != that.StepTime { + return false + } + if this.XFilesFactor != that.XFilesFactor { + return false + } + if this.HighPrecisionTimestamps != that.HighPrecisionTimestamps { + return false + } + if len(this.Values) != len(that.Values) { + return false + } + for i := range this.Values { + if this.Values[i] != that.Values[i] { + return false + } + } + if len(this.AppliedFunctions) != len(that.AppliedFunctions) { + return false + } + for i := range this.AppliedFunctions { + if this.AppliedFunctions[i] != that.AppliedFunctions[i] { + return false + } + } + if this.RequestStartTime != that.RequestStartTime { + return false + } + if this.RequestStopTime != that.RequestStopTime { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *MultiFetchResponse) EqualVT(that *MultiFetchResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if len(this.Metrics) != len(that.Metrics) { + return false + } + for i := range this.Metrics { + if !this.Metrics[i].EqualVT(that.Metrics[i]) { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *MultiGlobRequest) EqualVT(that *MultiGlobRequest) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if len(this.Metrics) != len(that.Metrics) { + return false + } + for i := range this.Metrics { + if this.Metrics[i] != that.Metrics[i] { + return false + } + } + if this.StartTime != that.StartTime { + return false + } + if this.StopTime != that.StopTime { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *GlobMatch) EqualVT(that *GlobMatch) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Path != that.Path { + return false + } + if this.IsLeaf != that.IsLeaf { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *GlobResponse) EqualVT(that *GlobResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Name != that.Name { + return false + } + if len(this.Matches) != len(that.Matches) { + return false + } + for i := range this.Matches { + if !this.Matches[i].EqualVT(that.Matches[i]) { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *MultiGlobResponse) EqualVT(that *MultiGlobResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if len(this.Metrics) != len(that.Metrics) { + return false + } + for i := range this.Metrics { + if !this.Metrics[i].EqualVT(that.Metrics[i]) { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *MetricsInfoRequest) EqualVT(that *MetricsInfoRequest) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Name != that.Name { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *MultiMetricsInfoRequest) EqualVT(that *MultiMetricsInfoRequest) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if len(this.Names) != len(that.Names) { + return false + } + for i := range this.Names { + if this.Names[i] != that.Names[i] { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *Retention) EqualVT(that *Retention) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.SecondsPerPoint != that.SecondsPerPoint { + return false + } + if this.NumberOfPoints != that.NumberOfPoints { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *MetricsInfoResponse) EqualVT(that *MetricsInfoResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Name != that.Name { + return false + } + if this.ConsolidationFunc != that.ConsolidationFunc { + return false + } + if this.MaxRetention != that.MaxRetention { + return false + } + if this.XFilesFactor != that.XFilesFactor { + return false + } + if len(this.Retentions) != len(that.Retentions) { + return false + } + for i := range this.Retentions { + if !this.Retentions[i].EqualVT(that.Retentions[i]) { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *MultiMetricsInfoResponse) EqualVT(that *MultiMetricsInfoResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if len(this.Metrics) != len(that.Metrics) { + return false + } + for i := range this.Metrics { + if !this.Metrics[i].EqualVT(that.Metrics[i]) { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *ZipperInfoResponse) EqualVT(that *ZipperInfoResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if len(this.Info) != len(that.Info) { + return false + } + for i := range this.Info { + if !this.Info[i].EqualVT(that.Info[i]) { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *ListMetricsResponse) EqualVT(that *ListMetricsResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if len(this.Metrics) != len(that.Metrics) { + return false + } + for i := range this.Metrics { + if this.Metrics[i] != that.Metrics[i] { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *MetricDetails) EqualVT(that *MetricDetails) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if this.Size != that.Size { + return false + } + if this.ModTime != that.ModTime { + return false + } + if this.ATime != that.ATime { + return false + } + if this.RdTime != that.RdTime { + return false + } + if this.RealSize != that.RealSize { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *MetricDetailsResponse) EqualVT(that *MetricDetailsResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if len(this.Metrics) != len(that.Metrics) { + return false + } + for i := range this.Metrics { + if !this.Metrics[i].EqualVT(that.Metrics[i]) { + return false + } + } + if this.FreeSpace != that.FreeSpace { + return false + } + if this.TotalSpace != that.TotalSpace { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *MultiDetailsResponse) EqualVT(that *MultiDetailsResponse) bool { + if this == nil { + return that == nil || fmt.Sprintf("%v", that) == "" + } else if that == nil { + return fmt.Sprintf("%v", this) == "" + } + if len(this.Metrics) != len(that.Metrics) { + return false + } + for i := range this.Metrics { + if !this.Metrics[i].EqualVT(that.Metrics[i]) { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + func (m *FilteringFunction) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -228,14 +676,12 @@ func (m *FetchRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { } if len(m.FilterFunctions) > 0 { for iNdEx := len(m.FilterFunctions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.FilterFunctions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + size, err := m.FilterFunctions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0x32 } @@ -309,14 +755,12 @@ func (m *MultiFetchRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { } if len(m.Metrics) > 0 { for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + size, err := m.Metrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } @@ -470,14 +914,12 @@ func (m *MultiFetchResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { } if len(m.Metrics) > 0 { for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + size, err := m.Metrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } @@ -619,14 +1061,12 @@ func (m *GlobResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { } if len(m.Matches) > 0 { for iNdEx := len(m.Matches) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Matches[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + size, err := m.Matches[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0x12 } @@ -673,14 +1113,12 @@ func (m *MultiGlobResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { } if len(m.Metrics) > 0 { for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + size, err := m.Metrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } @@ -845,14 +1283,12 @@ func (m *MetricsInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { } if len(m.Retentions) > 0 { for iNdEx := len(m.Retentions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Retentions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + size, err := m.Retentions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0x2a } @@ -917,14 +1353,12 @@ func (m *MultiMetricsInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err } if len(m.Metrics) > 0 { for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + size, err := m.Metrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } @@ -966,14 +1400,12 @@ func (m *ZipperInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { for k := range m.Info { v := m.Info[k] baseI := i - { - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0x12 i -= len(k) @@ -1133,14 +1565,12 @@ func (m *MetricDetailsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) for k := range m.Metrics { v := m.Metrics[k] baseI := i - { - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0x12 i -= len(k) @@ -1190,14 +1620,12 @@ func (m *MultiDetailsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) for k := range m.Metrics { v := m.Metrics[k] baseI := i - { - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0x12 i -= len(k) diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go new file mode 100644 index 000000000..60e82caa9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -0,0 +1,524 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONUnmarshalV2 = false + +// UnmarshalNext unmarshals the next JSON object from d into m. +func UnmarshalNext(d *json.Decoder, m proto.Message) error { + return new(Unmarshaler).UnmarshalNext(d, m) +} + +// Unmarshal unmarshals a JSON object from r into m. +func Unmarshal(r io.Reader, m proto.Message) error { + return new(Unmarshaler).Unmarshal(r, m) +} + +// UnmarshalString unmarshals a JSON object from s into m. +func UnmarshalString(s string, m proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // AllowUnknownFields specifies whether to allow messages to contain + // unknown JSON fields, as opposed to failing to unmarshal. + AllowUnknownFields bool + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize the way +// they are unmarshaled from JSON. Messages that implement this should also +// implement JSONPBMarshaler so that the custom format can be produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Unmarshal unmarshals a JSON object from r into m. +func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { + return u.UnmarshalNext(json.NewDecoder(r), m) +} + +// UnmarshalNext unmarshals the next JSON object from d into m. +func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { + if m == nil { + return errors.New("invalid nil message") + } + + // Parse the next JSON object from the stream. + raw := json.RawMessage{} + if err := d.Decode(&raw); err != nil { + return err + } + + // Check for custom unmarshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsu, ok := m.(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, raw) + } + + mr := proto.MessageReflect(m) + + // NOTE: For historical reasons, a top-level null is treated as a noop. + // This is incorrect, but kept for compatibility. + if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { + return nil + } + + if wrapJSONUnmarshalV2 { + // NOTE: If input message is non-empty, we need to preserve merge semantics + // of the old jsonpb implementation. These semantics are not supported by + // the protobuf JSON specification. + isEmpty := true + mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { + isEmpty = false // at least one iteration implies non-empty + return false + }) + if !isEmpty { + // Perform unmarshaling into a newly allocated, empty message. + mr = mr.New() + + // Use a defer to copy all unmarshaled fields into the original message. + dst := proto.MessageReflect(m) + defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + dst.Set(fd, v) + return true + }) + } + + // Unmarshal using the v2 JSON unmarshaler. + opts := protojson.UnmarshalOptions{ + DiscardUnknown: u.AllowUnknownFields, + } + if u.AnyResolver != nil { + opts.Resolver = anyResolver{u.AnyResolver} + } + return opts.Unmarshal(raw, mr.Interface()) + } else { + if err := u.unmarshalMessage(mr, raw); err != nil { + return err + } + return protoV2.CheckInitialized(mr.Interface()) + } +} + +func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { + md := m.Descriptor() + fds := md.Fields() + + if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, in) + } + + if string(in) == "null" && md.FullName() != "google.protobuf.Value" { + return nil + } + + switch wellKnownType(md.FullName()) { + case "Any": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + rawTypeURL, ok := jsonObject["@type"] + if !ok { + return errors.New("Any JSON doesn't have '@type'") + } + typeURL, err := unquoteString(string(rawTypeURL)) + if err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) + } + m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) + + var m2 protoreflect.Message + if u.AnyResolver != nil { + mi, err := u.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + if err == protoregistry.NotFound { + return fmt.Errorf("could not resolve Any message type: %v", typeURL) + } + return err + } + m2 = mt.New() + } + + if wellKnownType(m2.Descriptor().FullName()) != "" { + rawValue, ok := jsonObject["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + if err := u.unmarshalMessage(m2, rawValue); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } else { + delete(jsonObject, "@type") + rawJSON, err := json.Marshal(jsonObject) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + if err = u.unmarshalMessage(m2, rawJSON); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } + + rawWire, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) + return nil + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + v, err := u.unmarshalValue(m.NewField(fd), in, fd) + if err != nil { + return err + } + m.Set(fd, v) + return nil + case "Duration": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + d, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + sec := d.Nanoseconds() / 1e9 + nsec := d.Nanoseconds() % 1e9 + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Timestamp": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + sec := t.Unix() + nsec := t.Nanosecond() + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Value": + switch { + case string(in) == "null": + m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) + case string(in) == "true": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) + case string(in) == "false": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) + case hasPrefixAndSuffix('"', in, '"'): + s, err := unquoteString(string(in)) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) + case hasPrefixAndSuffix('[', in, ']'): + v := m.Mutable(fds.ByNumber(6)) + return u.unmarshalMessage(v.Message(), in) + case hasPrefixAndSuffix('{', in, '}'): + v := m.Mutable(fds.ByNumber(5)) + return u.unmarshalMessage(v.Message(), in) + default: + f, err := strconv.ParseFloat(string(in), 0) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) + } + return nil + case "ListValue": + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + lv := m.Mutable(fds.ByNumber(1)).List() + for _, raw := range jsonArray { + ve := lv.NewElement() + if err := u.unmarshalMessage(ve.Message(), raw); err != nil { + return err + } + lv.Append(ve) + } + return nil + case "Struct": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + mv := m.Mutable(fds.ByNumber(1)).Map() + for key, raw := range jsonObject { + kv := protoreflect.ValueOf(key).MapKey() + vv := mv.NewValue() + if err := u.unmarshalMessage(vv.Message(), raw); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) + } + mv.Set(kv, vv) + } + return nil + } + + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + // Handle known fields. + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if fd.IsWeak() && fd.Message().IsPlaceholder() { + continue // weak reference is not linked in + } + + // Search for any raw JSON value associated with this field. + var raw json.RawMessage + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + name = string(fd.JSONName()) + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + // Handle extension fields. + for name, raw := range jsonObject { + if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { + continue + } + + // Resolve the extension field by name. + xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(md) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + continue + } + delete(jsonObject, name) + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + if !u.AllowUnknownFields && len(jsonObject) > 0 { + for name := range jsonObject { + return fmt.Errorf("unknown field %q in %v", name, md.FullName()) + } + } + return nil +} + +func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { + if md := fd.Message(); md != nil { + return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated + } + return false +} + +func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { + if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { + _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) + return ok + } + return false +} + +func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch { + case fd.IsList(): + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return v, err + } + lv := v.List() + for _, raw := range jsonArray { + ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) + if err != nil { + return v, err + } + lv.Append(ve) + } + return v, nil + case fd.IsMap(): + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return v, err + } + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + for key, raw := range jsonObject { + var kv protoreflect.MapKey + if kfd.Kind() == protoreflect.StringKind { + kv = protoreflect.ValueOf(key).MapKey() + } else { + v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) + if err != nil { + return v, err + } + kv = v.MapKey() + } + + vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) + if err != nil { + return v, err + } + mv.Set(kv, vv) + } + return v, nil + default: + return u.unmarshalSingularValue(v, in, fd) + } +} + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(+1), + `"-Infinity"`: math.Inf(-1), +} + +func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + return unmarshalValue(in, new(bool)) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return unmarshalValue(trimQuote(in), new(int32)) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return unmarshalValue(trimQuote(in), new(int64)) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return unmarshalValue(trimQuote(in), new(uint32)) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return unmarshalValue(trimQuote(in), new(uint64)) + case protoreflect.FloatKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat32(float32(f)), nil + } + return unmarshalValue(trimQuote(in), new(float32)) + case protoreflect.DoubleKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat64(float64(f)), nil + } + return unmarshalValue(trimQuote(in), new(float64)) + case protoreflect.StringKind: + return unmarshalValue(in, new(string)) + case protoreflect.BytesKind: + return unmarshalValue(in, new([]byte)) + case protoreflect.EnumKind: + if hasPrefixAndSuffix('"', in, '"') { + vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) + if vd == nil { + return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) + } + return protoreflect.ValueOfEnum(vd.Number()), nil + } + return unmarshalValue(in, new(protoreflect.EnumNumber)) + case protoreflect.MessageKind, protoreflect.GroupKind: + err := u.unmarshalMessage(v.Message(), in) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } +} + +func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { + err := json.Unmarshal(in, v) + return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err +} + +func unquoteString(in string) (out string, err error) { + err = json.Unmarshal([]byte(in), &out) + return out, err +} + +func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { + if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { + return true + } + return false +} + +// trimQuote is like unquoteString but simply strips surrounding quotes. +// This is incorrect, but is behavior done by the legacy implementation. +func trimQuote(in []byte) []byte { + if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { + in = in[1 : len(in)-1] + } + return in +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go new file mode 100644 index 000000000..685c80a62 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -0,0 +1,559 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONMarshalV2 = false + +// Marshaler is a configurable object for marshaling protocol buffer messages +// to the specified JSON representation. +type Marshaler struct { + // OrigName specifies whether to use the original protobuf name for fields. + OrigName bool + + // EnumsAsInts specifies whether to render enum values as integers, + // as opposed to string values. + EnumsAsInts bool + + // EmitDefaults specifies whether to render fields with zero values. + EmitDefaults bool + + // Indent controls whether the output is compact or not. + // If empty, the output is compact JSON. Otherwise, every JSON object + // entry and JSON array value will be on its own line. + // Each line will be preceded by repeated copies of Indent, where the + // number of copies is the current indentation depth. + Indent string + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should also +// implement JSONPBUnmarshaler so that the custom format can be parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// Marshal serializes a protobuf message as JSON into w. +func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { + b, err := jm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// MarshalToString serializes a protobuf message as JSON in string form. +func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { + b, err := jm.marshal(m) + if err != nil { + return "", err + } + return string(b), nil +} + +func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { + v := reflect.ValueOf(m) + if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, errors.New("Marshal called with nil") + } + + // Check for custom marshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsm, ok := m.(JSONPBMarshaler); ok { + return jsm.MarshalJSONPB(jm) + } + + if wrapJSONMarshalV2 { + opts := protojson.MarshalOptions{ + UseProtoNames: jm.OrigName, + UseEnumNumbers: jm.EnumsAsInts, + EmitUnpopulated: jm.EmitDefaults, + Indent: jm.Indent, + } + if jm.AnyResolver != nil { + opts.Resolver = anyResolver{jm.AnyResolver} + } + return opts.Marshal(proto.MessageReflect(m).Interface()) + } else { + // Check for unpopulated required fields first. + m2 := proto.MessageReflect(m) + if err := protoV2.CheckInitialized(m2.Interface()); err != nil { + return nil, err + } + + w := jsonWriter{Marshaler: jm} + err := w.marshalMessage(m2, "", "") + return w.buf, err + } +} + +type jsonWriter struct { + *Marshaler + buf []byte +} + +func (w *jsonWriter) write(s string) { + w.buf = append(w.buf, s...) +} + +func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { + if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(w.Marshaler) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + w.write(string(b)) + return nil + } + + md := m.Descriptor() + fds := md.Fields() + + // Handle well-known types. + const secondInNanos = int64(time.Second / time.Nanosecond) + switch wellKnownType(md.FullName()) { + case "Any": + return w.marshalAny(m, indent) + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + case "Duration": + const maxSecondsInDuration = 315576000000 + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + var sign string + if s < 0 || ns < 0 { + sign, s, ns = "-", -1*s, -1*ns + } + x := fmt.Sprintf("%s%d.%09d", sign, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vs"`, x)) + return nil + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vZ"`, x)) + return nil + case "Value": + // JSON value; which is a null, number, string, bool, object, or array. + od := md.Oneofs().Get(0) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("nil Value") + } + return w.marshalValue(fd, m.Get(fd), indent) + case "Struct", "ListValue": + // JSON object or array. + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + + firstField := true + if typeURL != "" { + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + if fd == nil { + continue + } + } else { + i++ + } + + v := m.Get(fd) + + if !m.Has(fd) { + if !w.EmitDefaults || fd.ContainingOneof() != nil { + continue + } + if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { + v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars + } + } + + if !firstField { + w.writeComma() + } + if err := w.marshalField(fd, v, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if md.ExtensionRanges().Len() > 0 { + // Collect a sorted list of all extension descriptor and values. + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + if !firstField { + w.writeComma() + } + if err := w.marshalField(ext.desc, ext.val, indent); err != nil { + return err + } + firstField = false + } + } + + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) writeComma() { + if w.Indent != "" { + w.write(",\n") + } else { + w.write(",") + } +} + +func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + md := m.Descriptor() + typeURL := m.Get(md.Fields().ByNumber(1)).String() + rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() + + var m2 protoreflect.Message + if w.AnyResolver != nil { + mi, err := w.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + return err + } + m2 = mt.New() + } + + if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { + return err + } + + if wellKnownType(m2.Descriptor().FullName()) == "" { + return w.marshalMessage(m2, indent, typeURL) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + w.writeComma() + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + w.write(`"value": `) + } else { + w.write(`"value":`) + } + if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { + return err + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"@type":`) + if w.Indent != "" { + w.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + w.write(string(b)) + return nil +} + +// marshalField writes field description and value to the Writer. +func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"`) + switch { + case fd.IsExtension(): + // For message set, use the fname of the message as the extension name. + name := string(fd.FullName()) + if isMessageSet(fd.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + w.write("[" + name + "]") + case w.OrigName: + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + w.write(name) + default: + w.write(string(fd.JSONName())) + } + w.write(`":`) + if w.Indent != "" { + w.write(" ") + } + return w.marshalValue(fd, v, indent) +} + +func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case fd.IsList(): + w.write("[") + comma := "" + lv := v.List() + for i := 0; i < lv.Len(); i++ { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write("]") + return nil + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + + // Collect a sorted list of all map keys and values. + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + + w.write(`{`) + comma := "" + for _, entry := range entries { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + + s := fmt.Sprint(entry.key.Interface()) + b, err := json.Marshal(s) + if err != nil { + return err + } + w.write(string(b)) + + w.write(`:`) + if w.Indent != "" { + w.write(` `) + } + + if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write(`}`) + return nil + default: + return w.marshalSingularValue(fd, v, indent) + } +} + +func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case !v.IsValid(): + w.write("null") + return nil + case fd.Message() != nil: + return w.marshalMessage(v.Message(), indent+w.Indent, "") + case fd.Enum() != nil: + if fd.Enum().FullName() == "google.protobuf.NullValue" { + w.write("null") + return nil + } + + vd := fd.Enum().Values().ByNumber(v.Enum()) + if vd == nil || w.EnumsAsInts { + w.write(strconv.Itoa(int(v.Enum()))) + } else { + w.write(`"` + string(vd.Name()) + `"`) + } + return nil + default: + switch v.Interface().(type) { + case float32, float64: + switch { + case math.IsInf(v.Float(), +1): + w.write(`"Infinity"`) + return nil + case math.IsInf(v.Float(), -1): + w.write(`"-Infinity"`) + return nil + case math.IsNaN(v.Float()): + w.write(`"NaN"`) + return nil + } + case int64, uint64: + w.write(fmt.Sprintf(`"%d"`, v.Interface())) + return nil + } + + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + w.write(string(b)) + return nil + } +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go new file mode 100644 index 000000000..480e2448d --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/json.go @@ -0,0 +1,69 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonpb provides functionality to marshal and unmarshal between a +// protocol buffer message and JSON. It follows the specification at +// https://developers.google.com/protocol-buffers/docs/proto3#json. +// +// Do not rely on the default behavior of the standard encoding/json package +// when called on generated message types as it does not operate correctly. +// +// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" +// package instead. +package jsonpb + +import ( + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// AnyResolver takes a type URL, present in an Any message, +// and resolves it into an instance of the associated message. +type AnyResolver interface { + Resolve(typeURL string) (proto.Message, error) +} + +type anyResolver struct{ AnyResolver } + +func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + return r.FindMessageByURL(string(message)) +} + +func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + m, err := r.Resolve(url) + if err != nil { + return nil, err + } + return protoimpl.X.MessageTypeOf(m), nil +} + +func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +func wellKnownType(s protoreflect.FullName) string { + if s.Parent() == "google.protobuf" { + switch s.Name() { + case "Empty", "Any", + "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue", + "Duration", "Timestamp", + "NullValue", "Struct", "Value", "ListValue": + return string(s.Name()) + } + } + return "" +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index 3d45c1a47..f01eff318 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -315,7 +315,7 @@ func (tf Transform) Option() Option { return tf.trans } // pops the address from the stack. Thus, when traversing into a pointer from // reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles // by checking whether the pointer has already been visited. The cycle detection -// uses a seperate stack for the x and y values. +// uses a separate stack for the x and y values. // // If a cycle is detected we need to determine whether the two pointers // should be considered equal. The definition of equality chosen by Equal diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 168f92f3c..2ad3bc85b 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -7,6 +7,7 @@ package cmp import ( "bytes" "fmt" + "math" "reflect" "strconv" "strings" @@ -96,15 +97,16 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } // Auto-detect the type of the data. - var isLinedText, isText, isBinary bool var sx, sy string + var ssx, ssy []string + var isString, isMostlyText, isPureLinedText, isBinary bool switch { case t.Kind() == reflect.String: sx, sy = vx.String(), vy.String() - isText = true // Initial estimate, verify later + isString = true case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): sx, sy = string(vx.Bytes()), string(vy.Bytes()) - isBinary = true // Initial estimate, verify later + isString = true case t.Kind() == reflect.Array: // Arrays need to be addressable for slice operations to work. vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() @@ -112,13 +114,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { vy2.Set(vy) vx, vy = vx2, vy2 } - if isText || isBinary { - var numLines, lastLineIdx, maxLineLen int - isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy) + if isString { + var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int for i, r := range sx + sy { - if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { - isBinary = true - break + numTotalRunes++ + if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError { + numValidRunes++ } if r == '\n' { if maxLineLen < i-lastLineIdx { @@ -128,8 +129,26 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { numLines++ } } - isText = !isBinary - isLinedText = isText && numLines >= 4 && maxLineLen <= 1024 + isPureText := numValidRunes == numTotalRunes + isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes)) + isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024 + isBinary = !isMostlyText + + // Avoid diffing by lines if it produces a significantly more complex + // edit script than diffing by bytes. + if isPureLinedText { + ssx = strings.Split(sx, "\n") + ssy = strings.Split(sy, "\n") + esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result { + return diff.BoolResult(ssx[ix] == ssy[iy]) + }) + esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result { + return diff.BoolResult(sx[ix] == sy[iy]) + }) + efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) + efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) + isPureLinedText = efficiencyLines < 4*efficiencyBytes + } } // Format the string into printable records. @@ -138,9 +157,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch { // If the text appears to be multi-lined text, // then perform differencing across individual lines. - case isLinedText: - ssx := strings.Split(sx, "\n") - ssy := strings.Split(sy, "\n") + case isPureLinedText: list = opts.formatDiffSlice( reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", func(v reflect.Value, d diffMode) textRecord { @@ -229,7 +246,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // If the text appears to be single-lined text, // then perform differencing in approximately fixed-sized chunks. // The output is printed as quoted strings. - case isText: + case isMostlyText: list = opts.formatDiffSlice( reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", func(v reflect.Value, d diffMode) textRecord { @@ -237,7 +254,6 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { return textRecord{Diff: d, Value: textLine(s)} }, ) - delim = "" // If the text appears to be binary data, // then perform differencing in approximately fixed-sized chunks. @@ -299,7 +315,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // Wrap the output with appropriate type information. var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} - if !isText { + if !isMostlyText { // The "{...}" byte-sequence literal is not valid Go syntax for strings. // Emit the type for extra clarity (e.g. "string{...}"). if t.Kind() == reflect.String { @@ -338,8 +354,11 @@ func (opts formatOptions) formatDiffSlice( vx, vy reflect.Value, chunkSize int, name string, makeRec func(reflect.Value, diffMode) textRecord, ) (list textList) { - es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { - return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + eq := func(ix, iy int) bool { + return vx.Index(ix).Interface() == vy.Index(iy).Interface() + } + es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { + return diff.BoolResult(eq(ix, iy)) }) appendChunks := func(v reflect.Value, d diffMode) int { @@ -364,6 +383,7 @@ func (opts formatOptions) formatDiffSlice( groups := coalesceAdjacentEdits(name, es) groups = coalesceInterveningIdentical(groups, chunkSize/4) + groups = cleanupSurroundingIdentical(groups, eq) maxGroup := diffStats{Name: name} for i, ds := range groups { if maxLen >= 0 && numDiffs >= maxLen { @@ -416,25 +436,36 @@ func (opts formatOptions) formatDiffSlice( // coalesceAdjacentEdits coalesces the list of edits into groups of adjacent // equal or unequal counts. +// +// Example: +// +// Input: "..XXY...Y" +// Output: [ +// {NumIdentical: 2}, +// {NumRemoved: 2, NumInserted 1}, +// {NumIdentical: 3}, +// {NumInserted: 1}, +// ] +// func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { - var prevCase int // Arbitrary index into which case last occurred - lastStats := func(i int) *diffStats { - if prevCase != i { + var prevMode byte + lastStats := func(mode byte) *diffStats { + if prevMode != mode { groups = append(groups, diffStats{Name: name}) - prevCase = i + prevMode = mode } return &groups[len(groups)-1] } for _, e := range es { switch e { case diff.Identity: - lastStats(1).NumIdentical++ + lastStats('=').NumIdentical++ case diff.UniqueX: - lastStats(2).NumRemoved++ + lastStats('!').NumRemoved++ case diff.UniqueY: - lastStats(2).NumInserted++ + lastStats('!').NumInserted++ case diff.Modified: - lastStats(2).NumModified++ + lastStats('!').NumModified++ } } return groups @@ -444,6 +475,35 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) // equal groups into adjacent unequal groups that currently result in a // dual inserted/removed printout. This acts as a high-pass filter to smooth // out high-frequency changes within the windowSize. +// +// Example: +// +// WindowSize: 16, +// Input: [ +// {NumIdentical: 61}, // group 0 +// {NumRemoved: 3, NumInserted: 1}, // group 1 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 9}, // └── coalesce +// {NumIdentical: 64}, // group 2 +// {NumRemoved: 3, NumInserted: 1}, // group 3 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 7}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 2}, // └── coalesce +// {NumIdentical: 63}, // group 4 +// ] +// Output: [ +// {NumIdentical: 61}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 64}, +// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 63}, +// ] +// func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { groups, groupsOrig := groups[:0], groups for i, ds := range groupsOrig { @@ -463,3 +523,91 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat } return groups } + +// cleanupSurroundingIdentical scans through all unequal groups, and +// moves any leading sequence of equal elements to the preceding equal group and +// moves and trailing sequence of equal elements to the succeeding equal group. +// +// This is necessary since coalesceInterveningIdentical may coalesce edit groups +// together such that leading/trailing spans of equal elements becomes possible. +// Note that this can occur even with an optimal diffing algorithm. +// +// Example: +// +// Input: [ +// {NumIdentical: 61}, +// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements +// {NumIdentical: 67}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements +// {NumIdentical: 54}, +// ] +// Output: [ +// {NumIdentical: 64}, // incremented by 3 +// {NumRemoved: 9}, +// {NumIdentical: 67}, +// {NumRemoved: 9}, +// {NumIdentical: 64}, // incremented by 10 +// ] +// +func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { + var ix, iy int // indexes into sequence x and y + for i, ds := range groups { + // Handle equal group. + if ds.NumDiff() == 0 { + ix += ds.NumIdentical + iy += ds.NumIdentical + continue + } + + // Handle unequal group. + nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified + ny := ds.NumIdentical + ds.NumInserted + ds.NumModified + var numLeadingIdentical, numTrailingIdentical int + for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ { + numLeadingIdentical++ + } + for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ { + numTrailingIdentical++ + } + if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { + if numLeadingIdentical > 0 { + // Remove leading identical span from this group and + // insert it into the preceding group. + if i-1 >= 0 { + groups[i-1].NumIdentical += numLeadingIdentical + } else { + // No preceding group exists, so prepend a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...) + }() + } + // Increment indexes since the preceding group would have handled this. + ix += numLeadingIdentical + iy += numLeadingIdentical + } + if numTrailingIdentical > 0 { + // Remove trailing identical span from this group and + // insert it into the succeeding group. + if i+1 < len(groups) { + groups[i+1].NumIdentical += numTrailingIdentical + } else { + // No succeeding group exists, so append a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical}) + }() + } + // Do not increment indexes since the succeeding group will handle this. + } + + // Update this group since some identical elements were removed. + nx -= numIdentical + ny -= numIdentical + groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny} + } + ix += nx + iy += ny + } + return groups +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index dc200395c..41649d267 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -13,12 +13,42 @@ const ( compareGreater ) +var ( + intType = reflect.TypeOf(int(1)) + int8Type = reflect.TypeOf(int8(1)) + int16Type = reflect.TypeOf(int16(1)) + int32Type = reflect.TypeOf(int32(1)) + int64Type = reflect.TypeOf(int64(1)) + + uintType = reflect.TypeOf(uint(1)) + uint8Type = reflect.TypeOf(uint8(1)) + uint16Type = reflect.TypeOf(uint16(1)) + uint32Type = reflect.TypeOf(uint32(1)) + uint64Type = reflect.TypeOf(uint64(1)) + + float32Type = reflect.TypeOf(float32(1)) + float64Type = reflect.TypeOf(float64(1)) + + stringType = reflect.TypeOf("") +) + func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { + obj1Value := reflect.ValueOf(obj1) + obj2Value := reflect.ValueOf(obj2) + + // throughout this switch we try and avoid calling .Convert() if possible, + // as this has a pretty big performance impact switch kind { case reflect.Int: { - intobj1 := obj1.(int) - intobj2 := obj2.(int) + intobj1, ok := obj1.(int) + if !ok { + intobj1 = obj1Value.Convert(intType).Interface().(int) + } + intobj2, ok := obj2.(int) + if !ok { + intobj2 = obj2Value.Convert(intType).Interface().(int) + } if intobj1 > intobj2 { return compareGreater, true } @@ -31,8 +61,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Int8: { - int8obj1 := obj1.(int8) - int8obj2 := obj2.(int8) + int8obj1, ok := obj1.(int8) + if !ok { + int8obj1 = obj1Value.Convert(int8Type).Interface().(int8) + } + int8obj2, ok := obj2.(int8) + if !ok { + int8obj2 = obj2Value.Convert(int8Type).Interface().(int8) + } if int8obj1 > int8obj2 { return compareGreater, true } @@ -45,8 +81,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Int16: { - int16obj1 := obj1.(int16) - int16obj2 := obj2.(int16) + int16obj1, ok := obj1.(int16) + if !ok { + int16obj1 = obj1Value.Convert(int16Type).Interface().(int16) + } + int16obj2, ok := obj2.(int16) + if !ok { + int16obj2 = obj2Value.Convert(int16Type).Interface().(int16) + } if int16obj1 > int16obj2 { return compareGreater, true } @@ -59,8 +101,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Int32: { - int32obj1 := obj1.(int32) - int32obj2 := obj2.(int32) + int32obj1, ok := obj1.(int32) + if !ok { + int32obj1 = obj1Value.Convert(int32Type).Interface().(int32) + } + int32obj2, ok := obj2.(int32) + if !ok { + int32obj2 = obj2Value.Convert(int32Type).Interface().(int32) + } if int32obj1 > int32obj2 { return compareGreater, true } @@ -73,8 +121,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Int64: { - int64obj1 := obj1.(int64) - int64obj2 := obj2.(int64) + int64obj1, ok := obj1.(int64) + if !ok { + int64obj1 = obj1Value.Convert(int64Type).Interface().(int64) + } + int64obj2, ok := obj2.(int64) + if !ok { + int64obj2 = obj2Value.Convert(int64Type).Interface().(int64) + } if int64obj1 > int64obj2 { return compareGreater, true } @@ -87,8 +141,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Uint: { - uintobj1 := obj1.(uint) - uintobj2 := obj2.(uint) + uintobj1, ok := obj1.(uint) + if !ok { + uintobj1 = obj1Value.Convert(uintType).Interface().(uint) + } + uintobj2, ok := obj2.(uint) + if !ok { + uintobj2 = obj2Value.Convert(uintType).Interface().(uint) + } if uintobj1 > uintobj2 { return compareGreater, true } @@ -101,8 +161,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Uint8: { - uint8obj1 := obj1.(uint8) - uint8obj2 := obj2.(uint8) + uint8obj1, ok := obj1.(uint8) + if !ok { + uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8) + } + uint8obj2, ok := obj2.(uint8) + if !ok { + uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8) + } if uint8obj1 > uint8obj2 { return compareGreater, true } @@ -115,8 +181,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Uint16: { - uint16obj1 := obj1.(uint16) - uint16obj2 := obj2.(uint16) + uint16obj1, ok := obj1.(uint16) + if !ok { + uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16) + } + uint16obj2, ok := obj2.(uint16) + if !ok { + uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16) + } if uint16obj1 > uint16obj2 { return compareGreater, true } @@ -129,8 +201,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Uint32: { - uint32obj1 := obj1.(uint32) - uint32obj2 := obj2.(uint32) + uint32obj1, ok := obj1.(uint32) + if !ok { + uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32) + } + uint32obj2, ok := obj2.(uint32) + if !ok { + uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32) + } if uint32obj1 > uint32obj2 { return compareGreater, true } @@ -143,8 +221,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Uint64: { - uint64obj1 := obj1.(uint64) - uint64obj2 := obj2.(uint64) + uint64obj1, ok := obj1.(uint64) + if !ok { + uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64) + } + uint64obj2, ok := obj2.(uint64) + if !ok { + uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64) + } if uint64obj1 > uint64obj2 { return compareGreater, true } @@ -157,8 +241,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Float32: { - float32obj1 := obj1.(float32) - float32obj2 := obj2.(float32) + float32obj1, ok := obj1.(float32) + if !ok { + float32obj1 = obj1Value.Convert(float32Type).Interface().(float32) + } + float32obj2, ok := obj2.(float32) + if !ok { + float32obj2 = obj2Value.Convert(float32Type).Interface().(float32) + } if float32obj1 > float32obj2 { return compareGreater, true } @@ -171,8 +261,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Float64: { - float64obj1 := obj1.(float64) - float64obj2 := obj2.(float64) + float64obj1, ok := obj1.(float64) + if !ok { + float64obj1 = obj1Value.Convert(float64Type).Interface().(float64) + } + float64obj2, ok := obj2.(float64) + if !ok { + float64obj2 = obj2Value.Convert(float64Type).Interface().(float64) + } if float64obj1 > float64obj2 { return compareGreater, true } @@ -185,8 +281,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.String: { - stringobj1 := obj1.(string) - stringobj2 := obj2.(string) + stringobj1, ok := obj1.(string) + if !ok { + stringobj1 = obj1Value.Convert(stringType).Interface().(string) + } + stringobj2, ok := obj2.(string) + if !ok { + stringobj2 = obj2Value.Convert(stringType).Interface().(string) + } if stringobj1 > stringobj2 { return compareGreater, true } @@ -240,6 +342,24 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) } +// Positive asserts that the specified element is positive +// +// assert.Positive(t, 1) +// assert.Positive(t, 1.23) +func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + zero := reflect.Zero(reflect.TypeOf(e)) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) +} + +// Negative asserts that the specified element is negative +// +// assert.Negative(t, -1) +// assert.Negative(t, -1.23) +func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + zero := reflect.Zero(reflect.TypeOf(e)) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) +} + func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 49370eb16..4dfd1229a 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -114,6 +114,24 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { return Error(t, err, append([]interface{}{msg}, args...)...) } +// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) +} + +// ErrorIsf asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...) +} + // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // @@ -321,6 +339,54 @@ func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsil return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) } +// IsDecreasingf asserts that the collection is decreasing +// +// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsDecreasing(t, object, append([]interface{}{msg}, args...)...) +} + +// IsIncreasingf asserts that the collection is increasing +// +// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsIncreasing(t, object, append([]interface{}{msg}, args...)...) +} + +// IsNonDecreasingf asserts that the collection is not decreasing +// +// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...) +} + +// IsNonIncreasingf asserts that the collection is not increasing +// +// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) +} + // IsTypef asserts that the specified objects are of the same type. func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -375,6 +441,17 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) } +// Negativef asserts that the specified element is negative +// +// assert.Negativef(t, -1, "error message %s", "formatted") +// assert.Negativef(t, -1.23, "error message %s", "formatted") +func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Negative(t, e, append([]interface{}{msg}, args...)...) +} + // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // @@ -476,6 +553,15 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) } +// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) +} + // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") @@ -572,6 +658,17 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) } +// Positivef asserts that the specified element is positive +// +// assert.Positivef(t, 1, "error message %s", "formatted") +// assert.Positivef(t, 1.23, "error message %s", "formatted") +func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Positive(t, e, append([]interface{}{msg}, args...)...) +} + // Regexpf asserts that a specified regexp matches a string. // // assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 9db889427..25337a6f0 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -204,6 +204,42 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { return Error(a.t, err, msgAndArgs...) } +// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorAs(a.t, err, target, msgAndArgs...) +} + +// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorAsf(a.t, err, target, msg, args...) +} + +// ErrorIs asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorIs(a.t, err, target, msgAndArgs...) +} + +// ErrorIsf asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorIsf(a.t, err, target, msg, args...) +} + // Errorf asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() @@ -631,6 +667,102 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) } +// IsDecreasing asserts that the collection is decreasing +// +// a.IsDecreasing([]int{2, 1, 0}) +// a.IsDecreasing([]float{2, 1}) +// a.IsDecreasing([]string{"b", "a"}) +func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsDecreasing(a.t, object, msgAndArgs...) +} + +// IsDecreasingf asserts that the collection is decreasing +// +// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") +// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") +func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsDecreasingf(a.t, object, msg, args...) +} + +// IsIncreasing asserts that the collection is increasing +// +// a.IsIncreasing([]int{1, 2, 3}) +// a.IsIncreasing([]float{1, 2}) +// a.IsIncreasing([]string{"a", "b"}) +func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsIncreasing(a.t, object, msgAndArgs...) +} + +// IsIncreasingf asserts that the collection is increasing +// +// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") +// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") +func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsIncreasingf(a.t, object, msg, args...) +} + +// IsNonDecreasing asserts that the collection is not decreasing +// +// a.IsNonDecreasing([]int{1, 1, 2}) +// a.IsNonDecreasing([]float{1, 2}) +// a.IsNonDecreasing([]string{"a", "b"}) +func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNonDecreasing(a.t, object, msgAndArgs...) +} + +// IsNonDecreasingf asserts that the collection is not decreasing +// +// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") +func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNonDecreasingf(a.t, object, msg, args...) +} + +// IsNonIncreasing asserts that the collection is not increasing +// +// a.IsNonIncreasing([]int{2, 1, 1}) +// a.IsNonIncreasing([]float{2, 1}) +// a.IsNonIncreasing([]string{"b", "a"}) +func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNonIncreasing(a.t, object, msgAndArgs...) +} + +// IsNonIncreasingf asserts that the collection is not increasing +// +// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") +func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNonIncreasingf(a.t, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -739,6 +871,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i return Lessf(a.t, e1, e2, msg, args...) } +// Negative asserts that the specified element is negative +// +// a.Negative(-1) +// a.Negative(-1.23) +func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Negative(a.t, e, msgAndArgs...) +} + +// Negativef asserts that the specified element is negative +// +// a.Negativef(-1, "error message %s", "formatted") +// a.Negativef(-1.23, "error message %s", "formatted") +func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Negativef(a.t, e, msg, args...) +} + // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // @@ -941,6 +1095,24 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str return NotEqualf(a.t, expected, actual, msg, args...) } +// NotErrorIs asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorIs(a.t, err, target, msgAndArgs...) +} + +// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorIsf(a.t, err, target, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1133,6 +1305,28 @@ func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) b return Panicsf(a.t, f, msg, args...) } +// Positive asserts that the specified element is positive +// +// a.Positive(1) +// a.Positive(1.23) +func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Positive(a.t, e, msgAndArgs...) +} + +// Positivef asserts that the specified element is positive +// +// a.Positivef(1, "error message %s", "formatted") +// a.Positivef(1.23, "error message %s", "formatted") +func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Positivef(a.t, e, msg, args...) +} + // Regexp asserts that a specified regexp matches a string. // // a.Regexp(regexp.MustCompile("start"), "it's starting") diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go new file mode 100644 index 000000000..1c3b47182 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -0,0 +1,81 @@ +package assert + +import ( + "fmt" + "reflect" +) + +// isOrdered checks that collection contains orderable elements. +func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { + objKind := reflect.TypeOf(object).Kind() + if objKind != reflect.Slice && objKind != reflect.Array { + return false + } + + objValue := reflect.ValueOf(object) + objLen := objValue.Len() + + if objLen <= 1 { + return true + } + + value := objValue.Index(0) + valueInterface := value.Interface() + firstValueKind := value.Kind() + + for i := 1; i < objLen; i++ { + prevValue := value + prevValueInterface := valueInterface + + value = objValue.Index(i) + valueInterface = value.Interface() + + compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) + + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) + } + + if !containsValue(allowedComparesResults, compareResult) { + return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...) + } + } + + return true +} + +// IsIncreasing asserts that the collection is increasing +// +// assert.IsIncreasing(t, []int{1, 2, 3}) +// assert.IsIncreasing(t, []float{1, 2}) +// assert.IsIncreasing(t, []string{"a", "b"}) +func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) +} + +// IsNonIncreasing asserts that the collection is not increasing +// +// assert.IsNonIncreasing(t, []int{2, 1, 1}) +// assert.IsNonIncreasing(t, []float{2, 1}) +// assert.IsNonIncreasing(t, []string{"b", "a"}) +func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) +} + +// IsDecreasing asserts that the collection is decreasing +// +// assert.IsDecreasing(t, []int{2, 1, 0}) +// assert.IsDecreasing(t, []float{2, 1}) +// assert.IsDecreasing(t, []string{"b", "a"}) +func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) +} + +// IsNonDecreasing asserts that the collection is not decreasing +// +// assert.IsNonDecreasing(t, []int{1, 1, 2}) +// assert.IsNonDecreasing(t, []float{1, 2}) +// assert.IsNonDecreasing(t, []string{"a", "b"}) +func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 914a10d83..bcac4401f 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -172,8 +172,8 @@ func isTest(name, prefix string) bool { if len(name) == len(prefix) { // "Test" is ok return true } - rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(rune) + r, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(r) } func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { @@ -1622,6 +1622,7 @@ var spewConfig = spew.ConfigState{ DisableCapacities: true, SortKeys: true, DisableMethods: true, + MaxDepth: 10, } type tHelper interface { @@ -1693,3 +1694,81 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D } } } + +// ErrorIs asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if errors.Is(err, target) { + return true + } + + var expectedText string + if target != nil { + expectedText = target.Error() + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ + "expected: %q\n"+ + "in chain: %s", expectedText, chain, + ), msgAndArgs...) +} + +// NotErrorIs asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !errors.Is(err, target) { + return true + } + + var expectedText string + if target != nil { + expectedText = target.Error() + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ + "found: %q\n"+ + "in chain: %s", expectedText, chain, + ), msgAndArgs...) +} + +// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if errors.As(err, target) { + return true + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ + "expected: %q\n"+ + "in chain: %s", target, chain, + ), msgAndArgs...) +} + +func buildErrorChainString(err error) string { + if err == nil { + return "" + } + + e := errors.Unwrap(err) + chain := fmt.Sprintf("%q", err.Error()) + for e != nil { + chain += fmt.Sprintf("\n\t%q", e.Error()) + e = errors.Unwrap(e) + } + return chain +} diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml deleted file mode 100644 index 5847d94e5..000000000 --- a/vendor/google.golang.org/grpc/.travis.yml +++ /dev/null @@ -1,42 +0,0 @@ -language: go - -matrix: - include: - - go: 1.14.x - env: VET=1 GO111MODULE=on - - go: 1.14.x - env: RACE=1 GO111MODULE=on - - go: 1.14.x - env: RUN386=1 - - go: 1.14.x - env: GRPC_GO_RETRY=on - - go: 1.14.x - env: TESTEXTRAS=1 - - go: 1.13.x - env: GO111MODULE=on - - go: 1.12.x - env: GO111MODULE=on - - go: 1.11.x # Keep until interop tests no longer require Go1.11 - env: GO111MODULE=on - -go_import_path: google.golang.org/grpc - -before_install: - - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi - - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi - - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi - - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi - -install: - - try3() { eval "$*" || eval "$*" || eval "$*"; } - - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' - - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi - - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi - -script: - - set -e - - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; security/advancedtls/examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi - - if [[ -n "${VET}" ]]; then ./vet.sh; fi - - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi - - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi - - make test diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index cd03f8c76..52338d004 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -53,9 +53,8 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `make all` to test everything, OR - - `make vet` to catch vet errors - - `make test` to run the tests - - `make testrace` to run tests in race mode + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 093c82b3a..c6672c0a3 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -8,17 +8,18 @@ See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIB for general contribution guidelines. ## Maintainers (in alphabetical order) -- [canguler](https://github.com/canguler), Google LLC + - [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC - [menghanl](https://github.com/menghanl), Google LLC - [srini100](https://github.com/srini100), Google LLC ## Emeritus Maintainers (in alphabetical order) - [adelez](https://github.com/adelez), Google LLC +- [canguler](https://github.com/canguler), Google LLC - [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC - [jtattermusch](https://github.com/jtattermusch), Google LLC - [lyuxuan](https://github.com/lyuxuan), Google LLC - [makmukhi](https://github.com/makmukhi), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 1f0722f16..1f8960922 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -41,8 +41,6 @@ vetdeps: clean \ proto \ test \ - testappengine \ - testappenginedeps \ testrace \ vet \ vetdeps diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt new file mode 100644 index 000000000..530197749 --- /dev/null +++ b/vendor/google.golang.org/grpc/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 3949a683f..0e6ae69a5 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -136,6 +136,6 @@ errors. [Go module]: https://github.com/golang/go/wiki/Modules [gRPC]: https://grpc.io [Go gRPC docs]: https://grpc.io/docs/languages/go -[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696 +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 [quick start]: https://grpc.io/docs/languages/go/quickstart [go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 3220d87be..ae13ddac1 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -25,55 +25,77 @@ // later release. package attributes -import "fmt" - // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own -// types for keys. +// types for keys. Values should not be modified after they are added to an +// Attributes or if they were received from one. If values implement 'Equal(o +// interface{}) bool', it will be called by (*Attributes).Equal to determine +// whether two values with the same key should be considered equal. type Attributes struct { m map[interface{}]interface{} } -// New returns a new Attributes containing all key/value pairs in kvs. If the -// same key appears multiple times, the last value overwrites all previous -// values for that key. Panics if len(kvs) is not even. -func New(kvs ...interface{}) *Attributes { - if len(kvs)%2 != 0 { - panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) - } - a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)} - for i := 0; i < len(kvs)/2; i++ { - a.m[kvs[i*2]] = kvs[i*2+1] - } - return a +// New returns a new Attributes containing the key/value pair. +func New(key, value interface{}) *Attributes { + return &Attributes{m: map[interface{}]interface{}{key: value}} } -// WithValues returns a new Attributes containing all key/value pairs in a and -// kvs. Panics if len(kvs) is not even. If the same key appears multiple -// times, the last value overwrites all previous values for that key. To -// remove an existing key, use a nil value. -func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { +// WithValue returns a new Attributes containing the previous keys and values +// and the new key/value pair. If the same key appears multiple times, the +// last value overwrites all previous values for that key. To remove an +// existing key, use a nil value. value should not be modified later. +func (a *Attributes) WithValue(key, value interface{}) *Attributes { if a == nil { - return New(kvs...) + return New(key, value) } - if len(kvs)%2 != 0 { - panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) - } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)} + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } - for i := 0; i < len(kvs)/2; i++ { - n.m[kvs[i*2]] = kvs[i*2+1] - } + n.m[key] = value return n } // Value returns the value associated with these attributes for key, or nil if -// no value is associated with key. +// no value is associated with key. The returned value should not be modified. func (a *Attributes) Value(key interface{}) interface{} { if a == nil { return nil } return a.m[key] } + +// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) +// bool' is implemented for a value in the attributes, it is called to +// determine if the value matches the one stored in the other attributes. If +// Equal is not implemented, standard equality is used to determine if the two +// values are equal. Note that some types (e.g. maps) aren't comparable by +// default, so they must be wrapped in a struct, or in an alias type, with Equal +// defined. +func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true + } + if a == nil || o == nil { + return false + } + if len(a.m) != len(o.m) { + return false + } + for k, v := range a.m { + ov, ok := o.m[k] + if !ok { + // o missing element of a + return false + } + if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if !eq.Equal(ov) { + return false + } + } else if v != ov { + // Fallback to a standard equality check if Value is unimplemented. + return false + } + } + return true +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index ab531f4c0..f7a7697ca 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -27,6 +27,7 @@ import ( "net" "strings" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" @@ -75,24 +76,26 @@ func Get(name string) Builder { return nil } -// SubConn represents a gRPC sub connection. -// Each sub connection contains a list of addresses. gRPC will -// try to connect to them (in sequence), and stop trying the -// remainder once one connection is successful. +// A SubConn represents a single connection to a gRPC backend service. // -// The reconnect backoff will be applied on the list, not a single address. -// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// Each SubConn contains a list of addresses. // -// All SubConns start in IDLE, and will not try to connect. To trigger -// the connecting, Balancers must call Connect. -// When the connection encounters an error, it will reconnect immediately. -// When the connection becomes IDLE, it will not reconnect unless Connect is -// called. +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. // -// This interface is to be implemented by gRPC. Users should not need a -// brand new implementation of this interface. For the situations like -// testing, the new implementation should embed this interface. This allows -// gRPC to add new methods to this interface. +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. @@ -172,25 +175,32 @@ type ClientConn interface { // BuildOptions contains additional information for Build. type BuildOptions struct { - // DialCreds is the transport credential the Balancer implementation can - // use to dial to a remote load balancer server. The Balancer implementations - // can ignore this if it does not need to talk to another party securely. + // DialCreds is the transport credentials to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. DialCreds credentials.TransportCredentials - // CredsBundle is the credentials bundle that the Balancer can use. + // CredsBundle is the credentials bundle to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. CredsBundle credentials.Bundle - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. + // Dialer is the custom dialer to use when communicating with a remote load + // balancer server. Balancer implementations which do not communicate with a + // remote load balancer server can ignore this field. Dialer func(context.Context, string) (net.Conn, error) - // ChannelzParentID is the entity parent's channelz unique identification number. - ChannelzParentID int64 + // Authority is the server name to use as part of the authentication + // handshake when communicating with a remote load balancer server. Balancer + // implementations which do not communicate with a remote load balancer + // server can ignore this field. + Authority string + // ChannelzParentID is the parent ClientConn's channelz ID. + ChannelzParentID *channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. CustomUserAgent string - // Target contains the parsed address info of the dial target. It is the same resolver.Target as - // passed to the resolver. - // See the documentation for the resolver.Target type for details about what it contains. + // Target contains the parsed address info of the dial target. It is the + // same resolver.Target as passed to the resolver. See the documentation for + // the resolver.Target type for details about what it contains. Target resolver.Target } @@ -326,6 +336,20 @@ type Balancer interface { Close() } +// ExitIdler is an optional interface for balancers to implement. If +// implemented, ExitIdle will be called when ClientConn.Connect is called, if +// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause +// all SubConns to connect. +// +// Notice: it will be required for all balancers to implement this in a future +// release. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() +} + // SubConnState describes the state of a SubConn. type SubConnState struct { // ConnectivityState is the connectivity state of the SubConn. @@ -353,8 +377,10 @@ var ErrBadResolverState = errors.New("bad resolver state") // // It's not thread safe. type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. } // RecordTransition records state change happening in subConn and based on that @@ -362,9 +388,11 @@ type ConnectivityStateEvaluator struct { // // - If at least one SubConn in Ready, the aggregated state is Ready; // - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else the aggregated state is TransientFailure. +// - Else if at least one SubConn is TransientFailure, the aggregated state is Transient Failure; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else there are no subconns and the aggregated state is Transient Failure // -// Idle and Shutdown are not considered. +// Shutdown is not considered. func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { // Update counters. for idx, state := range []connectivity.State{oldState, newState} { @@ -374,6 +402,10 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne cse.numReady += updateVal case connectivity.Connecting: cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal } } @@ -384,5 +416,11 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne if cse.numConnecting > 0 { return connectivity.Connecting } + if cse.numTransientFailure > 0 { + return connectivity.TransientFailure + } + if cse.numIdle > 0 { + return connectivity.Idle + } return connectivity.TransientFailure } diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index c883efa0b..a67074a3a 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -22,7 +22,6 @@ import ( "errors" "fmt" - "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" @@ -42,7 +41,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: make(map[resolver.Address]subConnInfo), + subConns: resolver.NewAddressMap(), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, @@ -58,11 +57,6 @@ func (bb *baseBuilder) Name() string { return bb.name } -type subConnInfo struct { - subConn balancer.SubConn - attrs *attributes.Attributes -} - type baseBalancer struct { cc balancer.ClientConn pickerBuilder PickerBuilder @@ -70,7 +64,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns map[resolver.Address]subConnInfo // `attributes` is stripped from the keys of this map (the addresses) + subConns *resolver.AddressMap scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -81,7 +75,7 @@ type baseBalancer struct { func (b *baseBalancer) ResolverError(err error) { b.resolverErr = err - if len(b.subConns) == 0 { + if b.subConns.Len() == 0 { b.state = connectivity.TransientFailure } @@ -105,52 +99,29 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. - addrsSet := make(map[resolver.Address]struct{}) + addrsSet := resolver.NewAddressMap() for _, a := range s.ResolverState.Addresses { - // Strip attributes from addresses before using them as map keys. So - // that when two addresses only differ in attributes pointers (but with - // the same attribute content), they are considered the same address. - // - // Note that this doesn't handle the case where the attribute content is - // different. So if users want to set different attributes to create - // duplicate connections to the same backend, it doesn't work. This is - // fine for now, because duplicate is done by setting Metadata today. - // - // TODO: read attributes to handle duplicate connections. - aNoAttrs := a - aNoAttrs.Attributes = nil - addrsSet[aNoAttrs] = struct{}{} - if scInfo, ok := b.subConns[aNoAttrs]; !ok { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - // - // When creating SubConn, the original address with attributes is - // passed through. So that connection configurations in attributes - // (like creds) will be used. sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } - b.subConns[aNoAttrs] = subConnInfo{subConn: sc, attrs: a.Attributes} + b.subConns.Set(a, sc) b.scStates[sc] = connectivity.Idle + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) sc.Connect() - } else { - // Always update the subconn's address in case the attributes - // changed. - // - // The SubConn does a reflect.DeepEqual of the new and old - // addresses. So this is a noop if the current address is the same - // as the old one (including attributes). - scInfo.attrs = a.Attributes - b.subConns[aNoAttrs] = scInfo - b.cc.UpdateAddresses(scInfo.subConn, []resolver.Address{a}) } } - for a, scInfo := range b.subConns { + for _, a := range b.subConns.Keys() { + sci, _ := b.subConns.Get(a) + sc := sci.(balancer.SubConn) // a was removed by resolver. - if _, ok := addrsSet[a]; !ok { - b.cc.RemoveSubConn(scInfo.subConn) - delete(b.subConns, a) + if _, ok := addrsSet.Get(a); !ok { + b.cc.RemoveSubConn(sc) + b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. } @@ -192,10 +163,11 @@ func (b *baseBalancer) regeneratePicker() { readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. - for addr, scInfo := range b.subConns { - if st, ok := b.scStates[scInfo.subConn]; ok && st == connectivity.Ready { - addr.Attributes = scInfo.attrs - readySCs[scInfo.subConn] = SubConnInfo{Address: addr} + for _, addr := range b.subConns.Keys() { + sci, _ := b.subConns.Get(addr) + sc := sci.(balancer.SubConn) + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} } } b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) @@ -213,10 +185,14 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } return } - if oldS == connectivity.TransientFailure && s == connectivity.Connecting { - // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent + if oldS == connectivity.TransientFailure && + (s == connectivity.Connecting || s == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or // CONNECTING transitions to prevent the aggregated state from being // always CONNECTING when many backends exist but are all down. + if s == connectivity.Idle { + sc.Connect() + } return } b.scStates[sc] = s @@ -242,7 +218,6 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su b.state == connectivity.TransientFailure { b.regeneratePicker() } - b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } @@ -251,6 +226,11 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su func (b *baseBalancer) Close() { } +// ExitIdle is a nop because the base balancer attempts to stay connected to +// all SubConns at all times. +func (b *baseBalancer) ExitIdle() { +} + // NewErrPicker returns a Picker that always returns err on Pick(). func NewErrPicker(err error) balancer.Picker { return &errPicker{err: err} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index 50cc9da4a..cb4b3c203 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index a43d89641..6c3402e36 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -25,6 +25,7 @@ package grpclb import ( "context" "errors" + "fmt" "sync" "time" @@ -134,6 +135,7 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal lb := &lbBalancer{ cc: newLBCacheClientConn(cc), + dialTarget: opt.Target.Endpoint, target: opt.Target.Endpoint, opt: opt, fallbackTimeout: b.fallbackTimeout, @@ -163,9 +165,10 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal } type lbBalancer struct { - cc *lbCacheClientConn - target string - opt balancer.BuildOptions + cc *lbCacheClientConn + dialTarget string // user's dial target + target string // same as dialTarget unless overridden in service config + opt balancer.BuildOptions usePickFirst bool @@ -221,6 +224,7 @@ type lbBalancer struct { // when resolved address updates are received, and read in the goroutine // handling fallback. resolvedBackendAddrs []resolver.Address + connErr error // the last connection error } // regeneratePicker takes a snapshot of the balancer, and generates a picker from @@ -230,7 +234,7 @@ type lbBalancer struct { // Caller must hold lb.mu. func (lb *lbBalancer) regeneratePicker(resetDrop bool) { if lb.state == connectivity.TransientFailure { - lb.picker = &errPicker{err: balancer.ErrTransientFailure} + lb.picker = &errPicker{err: fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)} return } @@ -336,6 +340,8 @@ func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubCo // When an address was removed by resolver, b called RemoveSubConn but // kept the sc's state in scStates. Remove state for this sc here. delete(lb.scStates, sc) + case connectivity.TransientFailure: + lb.connErr = scs.ConnectionError } // Force regenerate picker if // - this sc became ready from not-ready @@ -394,6 +400,30 @@ func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { lb.mu.Lock() defer lb.mu.Unlock() + // grpclb uses the user's dial target to populate the `Name` field of the + // `InitialLoadBalanceRequest` message sent to the remote balancer. But when + // grpclb is used a child policy in the context of RLS, we want the `Name` + // field to be populated with the value received from the RLS server. To + // support this use case, an optional "target_name" field has been added to + // the grpclb LB policy's config. If specified, it overrides the name of + // the target to be sent to the remote balancer; if not, the target to be + // sent to the balancer will continue to be obtained from the target URI + // passed to the gRPC client channel. Whenever that target to be sent to the + // balancer is updated, we need to restart the stream to the balancer as + // this target is sent in the first message on the stream. + if gc != nil { + target := lb.dialTarget + if gc.ServiceName != "" { + target = gc.ServiceName + } + if target != lb.target { + lb.target = target + if lb.ccRemoteLB != nil { + lb.ccRemoteLB.cancelRemoteBalancerCall() + } + } + } + newUsePickFirst := childIsPickFirst(gc) if lb.usePickFirst == newUsePickFirst { return @@ -484,3 +514,5 @@ func (lb *lbBalancer) Close() { } lb.cc.close() } + +func (lb *lbBalancer) ExitIdle() {} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go index aac371963..8942c3131 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go @@ -34,6 +34,7 @@ const ( type grpclbServiceConfig struct { serviceconfig.LoadBalancingConfig ChildPolicy *[]map[string]json.RawMessage + ServiceName string } func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go index 5ac8d86bd..dab195941 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -33,8 +33,8 @@ import ( "google.golang.org/grpc/balancer" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/channelz" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -135,11 +135,19 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback } if lb.usePickFirst { - var sc balancer.SubConn - for _, sc = range lb.subConns { + var ( + scKey resolver.Address + sc balancer.SubConn + ) + for scKey, sc = range lb.subConns { break } if sc != nil { + if len(backendAddrs) == 0 { + lb.cc.cc.RemoveSubConn(sc) + delete(lb.subConns, scKey) + return + } lb.cc.cc.UpdateAddresses(sc, backendAddrs) sc.Connect() return @@ -206,6 +214,9 @@ type remoteBalancerCCWrapper struct { backoff backoff.Strategy done chan struct{} + streamMu sync.Mutex + streamCancel func() + // waitgroup to wait for all goroutines to exit. wg sync.WaitGroup } @@ -217,7 +228,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { } else if bundle := lb.grpclbClientConnCreds; bundle != nil { dopts = append(dopts, grpc.WithCredentialsBundle(bundle)) } else { - dopts = append(dopts, grpc.WithInsecure()) + dopts = append(dopts, grpc.WithTransportCredentials(insecure.NewCredentials())) } if lb.opt.Dialer != nil { dopts = append(dopts, grpc.WithContextDialer(lb.opt.Dialer)) @@ -228,9 +239,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { // Explicitly set pickfirst as the balancer. dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`)) dopts = append(dopts, grpc.WithResolvers(lb.manualResolver)) - if channelz.IsOn() { - dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) - } + dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) // Enable Keepalive for grpclb client. dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ @@ -319,10 +328,8 @@ func (ccw *remoteBalancerCCWrapper) sendLoadReport(s *balanceLoadClientStream, i } } -func (ccw *remoteBalancerCCWrapper) callRemoteBalancer() (backoff bool, _ error) { +func (ccw *remoteBalancerCCWrapper) callRemoteBalancer(ctx context.Context) (backoff bool, _ error) { lbClient := &loadBalancerClient{cc: ccw.cc} - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() stream, err := lbClient.BalanceLoad(ctx, grpc.WaitForReady(true)) if err != nil { return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) @@ -362,11 +369,43 @@ func (ccw *remoteBalancerCCWrapper) callRemoteBalancer() (backoff bool, _ error) return false, ccw.readServerList(stream) } +// cancelRemoteBalancerCall cancels the context used by the stream to the remote +// balancer. watchRemoteBalancer() takes care of restarting this call after the +// stream fails. +func (ccw *remoteBalancerCCWrapper) cancelRemoteBalancerCall() { + ccw.streamMu.Lock() + if ccw.streamCancel != nil { + ccw.streamCancel() + ccw.streamCancel = nil + } + ccw.streamMu.Unlock() +} + func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { - defer ccw.wg.Done() + defer func() { + ccw.wg.Done() + ccw.streamMu.Lock() + if ccw.streamCancel != nil { + // This is to make sure that we don't leak the context when we are + // directly returning from inside of the below `for` loop. + ccw.streamCancel() + ccw.streamCancel = nil + } + ccw.streamMu.Unlock() + }() + var retryCount int + var ctx context.Context for { - doBackoff, err := ccw.callRemoteBalancer() + ccw.streamMu.Lock() + if ccw.streamCancel != nil { + ccw.streamCancel() + ccw.streamCancel = nil + } + ctx, ccw.streamCancel = context.WithCancel(context.Background()) + ccw.streamMu.Unlock() + + doBackoff, err := ccw.callRemoteBalancer(ctx) select { case <-ccw.done: return diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go index a24264a34..4ecfa1c21 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -39,7 +39,7 @@ type State struct { // Set returns a copy of the provided state with attributes containing s. s's // data should not be mutated after calling Set. func Set(state resolver.State, s *State) resolver.State { - state.Attributes = state.Attributes.WithValues(key, s) + state.Attributes = state.Attributes.WithValue(key, s) return state } diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 43c2a1537..274eb2f85 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -47,11 +47,11 @@ func init() { type rrPickerBuilder struct{} func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("roundrobinPicker: newPicker called with info: %v", info) + logger.Infof("roundrobinPicker: Build called with info: %v", info) if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } - var scs []balancer.SubConn + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) for sc := range info.ReadySCs { scs = append(scs, sc) } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 4cc7f9159..b1c23eaae 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -20,85 +20,178 @@ package grpc import ( "fmt" + "strings" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} - -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - scBuffer *buffer.Unbounded - done *grpcsync.Event + cc *ClientConn + + // Since these fields are accessed only from handleXxx() methods which are + // synchronized by the watcher goroutine, we do not need a mutex to protect + // these fields. + balancer *gracefulswitch.Balancer + curBalancerName string - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} + updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). + resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. + closed *grpcsync.Event // Indicates if close has been called. + done *grpcsync.Event // Indicates if close has completed its work. } -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ cc: cc, - scBuffer: buffer.NewUnbounded(), + updateCh: buffer.NewUnbounded(), + resultCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), - subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. +// The following xxxUpdate structs wrap the arguments received as part of the +// corresponding update. The watcher goroutine uses the 'type' of the update to +// invoke the appropriate handler routine to handle the update. + +type ccStateUpdate struct { + ccs *balancer.ClientConnState +} + +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +type exitIdleUpdate struct{} + +type resolverErrorUpdate struct { + err error +} + +type switchToUpdate struct { + name string +} + +type subConnUpdate struct { + acbw *acBalancerWrapper +} + +// watcher is a long-running goroutine which reads updates from a channel and +// invokes corresponding methods on the underlying balancer. It ensures that +// these methods are invoked in a synchronous fashion. It also ensures that +// these methods are invoked in the order in which the updates were received. func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.scBuffer.Get(): - ccb.scBuffer.Load() - if ccb.done.HasFired() { + case u := <-ccb.updateCh.Get(): + ccb.updateCh.Load() + if ccb.closed.HasFired() { break } - ccb.balancerMu.Lock() - su := t.(*scStateUpdate) - ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) - ccb.balancerMu.Unlock() - case <-ccb.done.Done(): + switch update := u.(type) { + case *ccStateUpdate: + ccb.handleClientConnStateChange(update.ccs) + case *scStateUpdate: + ccb.handleSubConnStateChange(update) + case *exitIdleUpdate: + ccb.handleExitIdle() + case *resolverErrorUpdate: + ccb.handleResolverError(update.err) + case *switchToUpdate: + ccb.handleSwitchTo(update.name) + case *subConnUpdate: + ccb.handleRemoveSubConn(update.acbw) + default: + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) + } + case <-ccb.closed.Done(): } - if ccb.done.HasFired() { - ccb.balancer.Close() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) + if ccb.closed.HasFired() { + ccb.handleClose() return } } } -func (ccb *ccBalancerWrapper) close() { - ccb.done.Fire() +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +// +// Unlike other methods invoked by grpc to push updates to the underlying +// balancer, this method cannot simply push the update onto the update channel +// and return. It needs to return the error returned by the underlying balancer +// back to grpc which propagates that to the resolver. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) + + var res interface{} + select { + case res = <-ccb.resultCh.Get(): + ccb.resultCh.Load() + case <-ccb.closed.Done(): + // Return early if the balancer wrapper is closed while we are waiting for + // the underlying balancer to process a ClientConnState update. + return nil + } + // If the returned error is nil, attempting to type assert to error leads to + // panic. So, this needs to handled separately. + if res == nil { + return nil + } + return res.(error) +} + +// handleClientConnStateChange handles a ClientConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +// +// If the addresses specified in the update contain addresses of type "grpclb" +// and the selected LB policy is not "grpclb", these addresses will be filtered +// out and ccs will be modified with the updated address list. +func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { + if ccb.curBalancerName != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) + } + ccs.ResolverState.Addresses = addrs + } + ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) } -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { // When updating addresses for a SubConn, if the address in use is not in // the new addresses, the old ac will be tearDown() and a new ac will be // created. tearDown() generates a state change with Shutdown state, we @@ -109,58 +202,132 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co if sc == nil { return } - ccb.scBuffer.Put(&scStateUpdate{ + ccb.updateCh.Put(&scStateUpdate{ sc: sc, state: s, err: err, }) } -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - return ccb.balancer.UpdateClientConnState(*ccs) +// handleSubConnStateChange handles a SubConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { + ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) +} + +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.updateCh.Put(&exitIdleUpdate{}) +} + +func (ccb *ccBalancerWrapper) handleExitIdle() { + if ccb.cc.GetState() != connectivity.Idle { + return + } + ccb.balancer.ExitIdle() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.balancerMu.Lock() + ccb.updateCh.Put(&resolverErrorUpdate{err: err}) +} + +func (ccb *ccBalancerWrapper) handleResolverError(err error) { ccb.balancer.ResolverError(err) - ccb.balancerMu.Unlock() +} + +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.updateCh.Put(&switchToUpdate{name: name}) +} + +// handleSwitchTo handles a balancer switch update from the update channel. It +// calls the SwitchTo() method on the gracefulswitch.Balancer with a +// balancer.Builder corresponding to name. If no balancer.Builder is registered +// for the given name, it uses the default LB policy which is "pick_first". +func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { + // TODO: Other languages use case-insensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + + // TODO: Ensure that name is a registered LB policy when we get here. + // We currently only validate the `loadBalancingConfig` field. We need to do + // the same for the `loadBalancingPolicy` field and reject the service config + // if the specified policy is not registered. + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +// handleRemoveSucConn handles a request from the underlying balancer to remove +// a subConn. +// +// See comments in RemoveSubConn() for more details. +func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) close() { + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) handleClose() { + ccb.balancer.Close() + ccb.done.Fire() } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { if len(addrs) <= 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") - } ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } acbw := &acBalancerWrapper{ac: ac} acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it + // was required to handle the RemoveSubConn() method asynchronously by pushing + // the update onto the update channel. This was done to avoid a deadlock as + // switchBalancer() was holding cc.mu when calling Close() on the old + // balancer, which would in turn call RemoveSubConn(). + // + // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this + // asynchronously is probably not required anymore since the switchTo() method + // handles the balancer switch by pushing the update onto the channel. + // TODO(easwars): Handle this inline. acbw, ok := sc.(*acBalancerWrapper) if !ok { return } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } - delete(ccb.subConns, acbw) - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -172,11 +339,6 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is @@ -226,17 +388,17 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { return } - ac, err := cc.newAddrConn(addrs, opts) + newAC, err := cc.newAddrConn(addrs, opts) if err != nil { channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) return } - acbw.ac = ac - ac.mu.Lock() - ac.acbw = acbw - ac.mu.Unlock() + acbw.ac = newAC + newAC.mu.Lock() + newAC.acbw = acbw + newAC.mu.Unlock() if acState != connectivity.Idle { - ac.connect() + go newAC.connect() } } } @@ -244,7 +406,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { func (acbw *acBalancerWrapper) Connect() { acbw.mu.Lock() defer acbw.mu.Unlock() - acbw.ac.connect() + go acbw.ac.connect() } func (acbw *acBalancerWrapper) getAddrConn() *addrConn { diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 000000000..a220c47c5 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 24109264f..de6d41c23 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "math" + "net/url" "reflect" "strings" "sync" @@ -37,7 +38,6 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/grpcutil" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -79,17 +79,17 @@ var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") - // errTransportCredentialsMissing indicates that users want to transmit security - // information (e.g., OAuth2 token) which requires secure connection on an insecure - // connection. + // errNoTransportCredsInBundle indicated that the configured creds bundle + // returned a transport credentials which was nil. + errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") + // errTransportCredentialsMissing indicates that users want to transmit + // security information (e.g., OAuth2 token) which requires secure + // connection on an insecure connection. errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") - // errCredentialsConflict indicates that grpc.WithTransportCredentials() - // and grpc.WithInsecure() are both called for a connection. - errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") ) const ( @@ -159,35 +159,35 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtInfo, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(logger, cc.channelzID, "Channel Created") + pid := cc.dopts.channelzParentID + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) + ted := &channelz.TraceEventDesc{ + Desc: "Channel created", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), + Severity: channelz.CtInfo, } - cc.csMgr.channelzID = cc.channelzID } + channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) + cc.csMgr.channelzID = cc.channelzID - if !cc.dopts.insecure { - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - } else { - if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { - return nil, errCredentialsConflict - } + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return nil, errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return nil, errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return nil, errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { for _, cd := range cc.dopts.copts.PerRPCCredentials { if cd.RequireTransportSecurity() { return nil, errTransportCredentialsMissing @@ -248,38 +248,15 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } // Determine the resolver to use. - cc.parsedTarget = grpcutil.ParseTarget(cc.target, cc.dopts.copts.Dialer != nil) - channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) - resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) - if resolverBuilder == nil { - // If resolver builder is still nil, the parsed target's scheme is - // not registered. Fallback to default resolver and set Endpoint to - // the original target. - channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) - cc.parsedTarget = resolver.Target{ - Scheme: resolver.GetDefaultScheme(), - Endpoint: target, - } - resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) - if resolverBuilder == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) - } + resolverBuilder, err := cc.parseTargetAndFindResolver() + if err != nil { + return nil, err } - - creds := cc.dopts.copts.TransportCredentials - if creds != nil && creds.Info().ServerName != "" { - cc.authority = creds.Info().ServerName - } else if cc.dopts.insecure && cc.dopts.authority != "" { - cc.authority = cc.dopts.authority - } else if strings.HasPrefix(cc.target, "unix:") || strings.HasPrefix(cc.target, "unix-abstract:") { - cc.authority = "localhost" - } else if strings.HasPrefix(cc.parsedTarget.Endpoint, ":") { - cc.authority = "localhost" + cc.parsedTarget.Endpoint - } else { - // Use endpoint from "scheme://authority/endpoint" as the default - // authority for ClientConn. - cc.authority = cc.parsedTarget.Endpoint + cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts) + if err != nil { + return nil, err } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) if cc.dopts.scChan != nil && !scSet { // Blocking wait for the initial service config. @@ -301,14 +278,15 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerBuildOpts = balancer.BuildOptions{ + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ DialCreds: credsClone, CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParentID: cc.channelzID, Target: cc.parsedTarget, - } + }) // Build the resolver. rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) @@ -322,6 +300,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // A blocking dial blocks until the clientConn is ready. if cc.dopts.block { for { + cc.Connect() s := cc.GetState() if s == connectivity.Ready { break @@ -416,7 +395,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 + channelzID *channelz.Identifier } // updateState updates the connectivity.State of ClientConn. @@ -482,34 +461,36 @@ var _ ClientConnInterface = (*ClientConn)(nil) // handshakes. It also handles errors on established connections by // re-resolving the name and reconnecting. type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper - + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + blockingpicker *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. - mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. firstResolveEvent *grpcsync.Event - channelzID int64 // channelz unique identification number - czData *channelzData + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error @@ -539,12 +520,24 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec // // Experimental // -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } +// Connect causes all subchannels in the ClientConn to attempt to connect if +// the channel is idle. Does not wait for the connection attempts to begin +// before returning. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) Connect() { + cc.balancerWrapper.exitIdle() +} + func (cc *ClientConn) scWatcher() { for { select { @@ -622,9 +615,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // with the new addresses. cc.maybeApplyDefaultServiceConfig(nil) - if cc.balancerWrapper != nil { - cc.balancerWrapper.resolverError(err) - } + cc.balancerWrapper.resolverError(err) // No addresses are valid with err set; return early. cc.mu.Unlock() @@ -632,7 +623,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var ret error - if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { + if cc.dopts.disableServiceConfig { + channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig(s.Addresses) + } else if s.ServiceConfig == nil { cc.maybeApplyDefaultServiceConfig(s.Addresses) // TODO: do we need to apply a failing LB policy if there is no // default, per the error handling design? @@ -649,16 +643,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState - if cc.balancerWrapper == nil { - var err error - if s.ServiceConfig.Err != nil { - err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) - } else { - err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) - } - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) - cc.csMgr.updateState(connectivity.TransientFailure) + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLB(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -666,24 +654,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } - - cbn := cc.curBalancerName bw := cc.balancerWrapper cc.mu.Unlock() - if cbn != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { ret = uccsErr // prefer ErrBadResolver state since any other error is @@ -692,51 +668,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. // // Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if strings.EqualFold(cc.curBalancerName, name) { - return - } - - channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") - return - } - if cc.balancerWrapper != nil { - cc.balancerWrapper.close() - } - - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) } else { - channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } - - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) } func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s, err) - cc.mu.Unlock() + cc.balancerWrapper.updateSubConnState(sc, s, err) } // newAddrConn creates an addrConn for addrs and adds it to cc.conns. @@ -759,17 +712,21 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub cc.mu.Unlock() return nil, ErrClientConnClosing } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + cc.conns[ac] = struct{}{} cc.mu.Unlock() return ac, nil @@ -840,21 +797,35 @@ func (ac *addrConn) connect() error { ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - // Start a goroutine connecting to the server asynchronously. - go ac.resetTransport() + ac.resetTransport() return nil } +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + // tryUpdateAddrs tries to update ac.addrs with the new addresses list. // -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// // If ac is TransientFailure, it updates ac.addrs and returns true. The updated // addresses will be picked up by retry in the next iteration after backoff. // // If ac is Shutdown or Idle, it updates ac.addrs and returns true. // +// If the addresses is the same as the old list, it does nothing and returns +// true. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. // - If true, it updates ac.addrs and returns true. The ac will keep using @@ -871,6 +842,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return true } + if equalAddresses(ac.addrs, addrs) { + return true + } + if ac.state == connectivity.Connecting { return false } @@ -878,6 +853,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { // ac.state is Ready, try to find the connected address. var curAddrFound bool for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) if reflect.DeepEqual(ac.curAddr, a) { curAddrFound = true break @@ -891,6 +867,26 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return curAddrFound } +// getServerName determines the serverName to be used in the connection +// handshake. The default value for the serverName is the authority on the +// ClientConn, which either comes from the user's dial target or through an +// authority override specified using the WithAuthority dial option. Name +// resolvers can specify a per-address override for the serverName through the +// resolver.Address.ServerName field which is used only if the WithAuthority +// dial option was not used. The rationale is that per-address authority +// overrides specified by the name resolver can represent a security risk, while +// an override specified by the user is more dependable since they probably know +// what they are doing. +func (cc *ClientConn) getServerName(addr resolver.Address) string { + if cc.dopts.authority != "" { + return cc.dopts.authority + } + if addr.ServerName != "" { + return addr.ServerName + } + return cc.authority +} + func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { if sc == nil { return MethodConfig{} @@ -930,14 +926,10 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) - if err != nil { - return nil, nil, toRPCErr(err) - } - return t, done, nil } func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { @@ -962,35 +954,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel cc.retryThrottler.Store((*retryThrottler)(nil)) } - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } } + cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -1041,37 +1024,37 @@ func (cc *ClientConn) Close() error { rWrapper := cc.resolverWrapper cc.resolverWrapper = nil bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil cc.mu.Unlock() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. cc.blockingpicker.close() - - if rWrapper != nil { - rWrapper.close() - } if bWrapper != nil { bWrapper.close() } + if rWrapper != nil { + rWrapper.close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", + ted := &channelz.TraceEventDesc{ + Desc: "Channel deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), Severity: channelz.CtInfo, } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + return nil } @@ -1101,7 +1084,7 @@ type addrConn struct { backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. + channelzID *channelz.Identifier czData *channelzData } @@ -1130,112 +1113,86 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } func (ac *addrConn) resetTransport() { - for i := 0; ; i++ { - if i > 0 { - ac.cc.resolveNow(resolver.ResolveNowOptions{}) - } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() return } + ac.updateConnectivityState(connectivity.TransientFailure, err) - addrs := ac.addrs - backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) - // This will be the duration that dial gets to finish. - dialDuration := minConnectTimeout - if ac.dopts.minConnectTimeout != nil { - dialDuration = ac.dopts.minConnectTimeout() - } - - if dialDuration < backoffFor { - // Give dial more time as we keep failing to connect. - dialDuration = backoffFor - } - // We can potentially spend all the time trying the first address, and - // if the server accepts the connection and then hangs, the following - // addresses will never be tried. - // - // The spec doesn't mention what should be done for multiple addresses. - // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm - connectDeadline := time.Now().Add(dialDuration) - - ac.updateConnectivityState(connectivity.Connecting, nil) - ac.transport = nil + // Backoff. + b := ac.resetBackoff ac.mu.Unlock() - newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) - if err != nil { - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - ac.updateConnectivityState(connectivity.TransientFailure, err) - - // Backoff. - b := ac.resetBackoff + ac.backoffIdx++ ac.mu.Unlock() - - timer := time.NewTimer(backoffFor) - select { - case <-timer.C: - ac.mu.Lock() - ac.backoffIdx++ - ac.mu.Unlock() - case <-b: - timer.Stop() - case <-ac.ctx.Done(): - timer.Stop() - return - } - continue + case <-b: + timer.Stop() + case <-ac.ctx.Done(): + timer.Stop() + return } ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - newTr.Close(fmt.Errorf("reached connectivity state: SHUTDOWN")) - return + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, err) } - ac.curAddr = addr - ac.transport = newTr - ac.backoffIdx = 0 - - hctx, hcancel := context.WithCancel(ac.ctx) - ac.startHealthCheck(hctx) ac.mu.Unlock() - - // Block until the created transport is down. And when this happens, - // we restart from the top of the addr list. - <-reconnect.Done() - hcancel() - // restart connecting - the top of the loop will set state to - // CONNECTING. This is against the current connectivity semantics doc, - // however it allows for graceful behavior for RPCs not yet dispatched - // - unfortunate timing would otherwise lead to the RPC failing even - // though the TRANSIENT_FAILURE state (called for by the doc) would be - // instantaneous. - // - // Ideally we should transition to Idle here and block until there is - // RPC activity that leads to the balancer requesting a reconnect of - // the associated SubConn. + return } + // Success; reset backoff. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at the -// first successful one. It returns the transport, the address and a Event in -// the successful case. The Event fires when the returned transport disconnects. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { +// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// the first successful one. It returns an error if no address was successfully +// connected, or updates ac appropriately with the new transport. +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() - return nil, resolver.Address{}, nil, errConnClosing + return errConnClosing } ac.cc.mu.RLock() @@ -1250,9 +1207,9 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(addr, copts, connectDeadline) if err == nil { - return newTr, addr, reconnect, nil + return nil } if firstConnErr == nil { firstConnErr = err @@ -1261,86 +1218,118 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T } // Couldn't connect to any address. - return nil, resolver.Address{}, nil, firstConnErr + return firstConnErr } -// createTransport creates a connection to addr. It returns the transport and a -// Event in the successful case. The Event fires when the returned transport -// disconnects. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { - prefaceReceived := make(chan struct{}) - onCloseCalled := make(chan struct{}) - reconnect := grpcsync.NewEvent() +// createTransport creates a connection to addr. It returns an error if the +// address was not successfully connected, or updates ac appropriately with the +// new transport. +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { + // TODO: Delete prefaceReceived and move the logic to wait for it into the + // transport. + prefaceReceived := grpcsync.NewEvent() + connClosed := grpcsync.NewEvent() - // addr.ServerName takes precedent over ClientConn authority, if present. - if addr.ServerName == "" { - addr.ServerName = ac.cc.authority - } + addr.ServerName = ac.cc.getServerName(addr) + hctx, hcancel := context.WithCancel(ac.ctx) + hcStarted := false // protected by ac.mu - once := sync.Once{} - onGoAway := func(r transport.GoAwayReason) { + onClose := func() { ac.mu.Lock() - ac.adjustParams(r) - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting, nil) - } - }) - ac.mu.Unlock() - reconnect.Fire() + defer ac.mu.Unlock() + defer connClosed.Fire() + defer hcancel() + if !hcStarted || hctx.Err() != nil { + // We didn't start the health check or set the state to READY, so + // no need to do anything else here. + // + // OR, we have already cancelled the health check context, meaning + // we have already called onClose once for this transport. In this + // case it would be dangerous to clear the transport and update the + // state, since there may be a new transport in this addrConn. + return + } + ac.transport = nil + // Refresh the name resolver + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, nil) + } } - onClose := func() { + onGoAway := func(r transport.GoAwayReason) { ac.mu.Lock() - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting, nil) - } - }) + ac.adjustParams(r) ac.mu.Unlock() - close(onCloseCalled) - reconnect.Fire() - } - - onPrefaceReceipt := func() { - close(prefaceReceived) + onClose() } connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } + copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) - return nil, nil, err + hcancel() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) + return err } select { - case <-time.After(time.Until(connectDeadline)): + case <-connectCtx.Done(): // We didn't get the preface in time. - newTr.Close(fmt.Errorf("failed to receive server preface within timeout")) - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) - return nil, nil, errors.New("timed out waiting for server handshake") - case <-prefaceReceived: + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + newTr.Close(transport.ErrConnClosing) + if connectCtx.Err() == context.DeadlineExceeded { + err := errors.New("failed to receive server preface within timeout") + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err) + return err + } + return nil + case <-prefaceReceived.Done(): // We got the preface - huzzah! things are good. - case <-onCloseCalled: - // The transport has already closed - noop. - return nil, nil, errors.New("connection closed") - // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + ac.mu.Lock() + defer ac.mu.Unlock() + if connClosed.HasFired() { + // onClose called first; go idle but do nothing else. + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, nil) + } + return nil + } + if ac.state == connectivity.Shutdown { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + go newTr.Close(transport.ErrConnClosing) + return nil + } + ac.curAddr = addr + ac.transport = newTr + hcStarted = true + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil + case <-connClosed.Done(): + // The transport has already closed. If we received the preface, too, + // this is not an error. + select { + case <-prefaceReceived.Done(): + return nil + default: + return errors.New("connection closed before server preface received") + } } - return newTr, reconnect, nil } // startHealthCheck starts the health checking stream (RPC) to watch the health @@ -1424,26 +1413,14 @@ func (ac *addrConn) resetConnectBackoff() { ac.mu.Unlock() } -// getReadyTransport returns the transport if ac's state is READY. -// Otherwise it returns nil, false. -// If ac's state is IDLE, it will trigger ac to connect. -func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { +// getReadyTransport returns the transport if ac's state is READY or nil if not. +func (ac *addrConn) getReadyTransport() transport.ClientTransport { ac.mu.Lock() - if ac.state == connectivity.Ready && ac.transport != nil { - t := ac.transport - ac.mu.Unlock() - return t, true - } - var idle bool - if ac.state == connectivity.Idle { - idle = true - } - ac.mu.Unlock() - // Trigger idle ac to connect. - if idle { - ac.connect() + defer ac.mu.Unlock() + if ac.state == connectivity.Ready { + return ac.transport } - return nil, false + return nil } // tearDown starts to tear down the addrConn. @@ -1473,19 +1450,18 @@ func (ac *addrConn) tearDown(err error) { curTr.GracefulClose() ac.mu.Lock() } - if channelz.IsOn() { - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() } @@ -1594,3 +1570,114 @@ func (cc *ClientConn) connectionError() error { defer cc.lceMu.Unlock() return cc.lastConnectionError } + +func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { + channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) + + var rb resolver.Builder + parsedTarget, err := parseTarget(cc.target) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) + } else { + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.Scheme) + if rb != nil { + cc.parsedTarget = parsedTarget + return rb, nil + } + } + + // We are here because the user's dial target did not contain a scheme or + // specified an unregistered scheme. We should fallback to the default + // scheme, except when a custom dialer is specified in which case, we should + // always use passthrough scheme. + defScheme := resolver.GetDefaultScheme() + channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) + canonicalTarget := defScheme + ":///" + cc.target + + parsedTarget, err = parseTarget(canonicalTarget) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) + return nil, err + } + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.Scheme) + if rb == nil { + return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme) + } + cc.parsedTarget = parsedTarget + return rb, nil +} + +// parseTarget uses RFC 3986 semantics to parse the given target into a +// resolver.Target struct containing scheme, authority and endpoint. Query +// params are stripped from the endpoint. +func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field instead of the `Endpoint` field. + endpoint := u.Path + if endpoint == "" { + endpoint = u.Opaque + } + endpoint = strings.TrimPrefix(endpoint, "/") + return resolver.Target{ + Scheme: u.Scheme, + Authority: u.Host, + Endpoint: endpoint, + URL: *u, + }, nil +} + +// Determine channel authority. The order of precedence is as follows: +// - user specified authority override using `WithAuthority` dial option +// - creds' notion of server name for the authentication handshake +// - endpoint from dial target of the form "scheme://[authority]/endpoint" +func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { + // Historically, we had two options for users to specify the serverName or + // authority for a channel. One was through the transport credentials + // (either in its constructor, or through the OverrideServerName() method). + // The other option (for cases where WithInsecure() dial option was used) + // was to use the WithAuthority() dial option. + // + // A few things have changed since: + // - `insecure` package with an implementation of the `TransportCredentials` + // interface for the insecure case + // - WithAuthority() dial option support for secure credentials + authorityFromCreds := "" + if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { + authorityFromCreds = creds.Info().ServerName + } + authorityFromDialOption := dopts.authority + if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { + return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + switch { + case authorityFromDialOption != "": + return authorityFromDialOption, nil + case authorityFromCreds != "": + return authorityFromCreds, nil + case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): + // TODO: remove when the unix resolver implements optional interface to + // return channel authority. + return "localhost", nil + case strings.HasPrefix(endpoint, ":"): + return "localhost" + endpoint, nil + default: + // TODO: Define an optional interface on the resolver builder to return + // the channel authority given the user's dial target. For resolvers + // which don't implement this interface, we will use the endpoint from + // "scheme://authority/endpoint" as the default authority. + return endpoint, nil + } +} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go index 010156261..4a8992642 100644 --- a/vendor/google.golang.org/grpc/connectivity/connectivity.go +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -18,7 +18,6 @@ // Package connectivity defines connectivity semantics. // For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. -// All APIs in this package are experimental. package connectivity import ( @@ -45,7 +44,7 @@ func (s State) String() string { return "SHUTDOWN" default: logger.Errorf("unknown connectivity state: %d", s) - return "Invalid-State" + return "INVALID_STATE" } } @@ -61,3 +60,35 @@ const ( // Shutdown indicates the ClientConn has started shutting down. Shutdown ) + +// ServingMode indicates the current mode of operation of the server. +// +// Only xDS enabled gRPC servers currently report their serving mode. +type ServingMode int + +const ( + // ServingModeStarting indicates that the server is starting up. + ServingModeStarting ServingMode = iota + // ServingModeServing indicates that the server contains all required + // configuration and is serving RPCs. + ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required configuration to serve RPCs. + ServingModeNotServing +) + +func (s ServingMode) String() string { + switch s { + case ServingModeStarting: + return "STARTING" + case ServingModeServing: + return "SERVING" + case ServingModeNotServing: + return "NOT_SERVING" + default: + logger.Errorf("unknown serving mode: %d", s) + return "INVALID_MODE" + } +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go index 77d759cd9..2de2c4aff 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -24,6 +24,7 @@ import ( "sync" grpc "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ) var ( @@ -49,7 +50,7 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { // Create a new connection to the handshaker service. Note that // this connection stays open until the application is closed. var err error - hsConn, err = hsDialer(hsAddress, grpc.WithInsecure()) + hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index a02c45828..fd55176b9 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/gcp/handshaker.proto diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 7eee7e4ec..96ff1877e 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -140,6 +140,11 @@ type TransportCredentials interface { // Additionally, ClientHandshakeInfo data will be available via the context // passed to this call. // + // The second argument to this method is the `:authority` header value used + // while creating new streams on this connection after authentication + // succeeds. Implementations must use this as the server name during the + // authentication handshake. + // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns @@ -153,9 +158,13 @@ type TransportCredentials interface { Info() ProtocolInfo // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials - // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. - // gRPC internals also use it to override the virtual hosting name if it is set. - // It must be called before dialing. Currently, this is only used by grpclb. + // OverrideServerName specifies the value used for the following: + // - verifying the hostname on the returned certificates + // - as SNI in the client's handshake to support virtual hosting + // - as the value for `:authority` header at stream creation time + // + // Deprecated: use grpc.WithAuthority instead. Will be supported + // throughout 1.x. OverrideServerName(string) error } @@ -169,8 +178,18 @@ type TransportCredentials interface { // // This API is experimental. type Bundle interface { + // TransportCredentials returns the transport credentials from the Bundle. + // + // Implementations must return non-nil transport credentials. If transport + // security is not needed by the Bundle, implementations may choose to + // return insecure.NewCredentials(). TransportCredentials() TransportCredentials + + // PerRPCCredentials returns the per-RPC credentials from the Bundle. + // + // May be nil if per-RPC credentials are not needed. PerRPCCredentials() PerRPCCredentials + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the // existing Bundle may cause races. // diff --git a/vendor/google.golang.org/grpc/credentials/google/google.go b/vendor/google.golang.org/grpc/credentials/google/google.go index 265d193c7..fbdf7dc29 100644 --- a/vendor/google.golang.org/grpc/credentials/google/google.go +++ b/vendor/google.golang.org/grpc/credentials/google/google.go @@ -35,57 +35,63 @@ const tokenRequestTimeout = 30 * time.Second var logger = grpclog.Component("credentials") -// NewDefaultCredentials returns a credentials bundle that is configured to work -// with google services. +// DefaultCredentialsOptions constructs options to build DefaultCredentials. +type DefaultCredentialsOptions struct { + // PerRPCCreds is a per RPC credentials that is passed to a bundle. + PerRPCCreds credentials.PerRPCCredentials +} + +// NewDefaultCredentialsWithOptions returns a credentials bundle that is +// configured to work with google services. // // This API is experimental. -func NewDefaultCredentials() credentials.Bundle { - c := &creds{ - newPerRPCCreds: func() credentials.PerRPCCredentials { - ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) - defer cancel() - perRPCCreds, err := oauth.NewApplicationDefault(ctx) - if err != nil { - logger.Warningf("google default creds: failed to create application oauth: %v", err) - } - return perRPCCreds - }, +func NewDefaultCredentialsWithOptions(opts DefaultCredentialsOptions) credentials.Bundle { + if opts.PerRPCCreds == nil { + ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) + defer cancel() + var err error + opts.PerRPCCreds, err = newADC(ctx) + if err != nil { + logger.Warningf("NewDefaultCredentialsWithOptions: failed to create application oauth: %v", err) + } } + c := &creds{opts: opts} bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) if err != nil { - logger.Warningf("google default creds: failed to create new creds: %v", err) + logger.Warningf("NewDefaultCredentialsWithOptions: failed to create new creds: %v", err) } return bundle } +// NewDefaultCredentials returns a credentials bundle that is configured to work +// with google services. +// +// This API is experimental. +func NewDefaultCredentials() credentials.Bundle { + return NewDefaultCredentialsWithOptions(DefaultCredentialsOptions{}) +} + // NewComputeEngineCredentials returns a credentials bundle that is configured to work // with google services. This API must only be used when running on GCE. Authentication configured // by this API represents the GCE VM's default service account. // // This API is experimental. func NewComputeEngineCredentials() credentials.Bundle { - c := &creds{ - newPerRPCCreds: func() credentials.PerRPCCredentials { - return oauth.NewComputeEngine() - }, - } - bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) - if err != nil { - logger.Warningf("compute engine creds: failed to create new creds: %v", err) - } - return bundle + return NewDefaultCredentialsWithOptions(DefaultCredentialsOptions{ + PerRPCCreds: oauth.NewComputeEngine(), + }) } // creds implements credentials.Bundle. type creds struct { + opts DefaultCredentialsOptions + // Supported modes are defined in internal/internal.go. mode string - // The transport credentials associated with this bundle. + // The active transport credentials associated with this bundle. transportCreds credentials.TransportCredentials - // The per RPC credentials associated with this bundle. + // The active per RPC credentials associated with this bundle. perRPCCreds credentials.PerRPCCredentials - // Creates new per RPC credentials - newPerRPCCreds func() credentials.PerRPCCredentials } func (c *creds) TransportCredentials() credentials.TransportCredentials { @@ -106,14 +112,17 @@ var ( newALTS = func() credentials.TransportCredentials { return alts.NewClientCreds(alts.DefaultClientOptions()) } + newADC = func(ctx context.Context) (credentials.PerRPCCredentials, error) { + return oauth.NewApplicationDefault(ctx) + } ) // NewWithMode should make a copy of Bundle, and switch mode. Modifying the // existing Bundle may cause races. func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { newCreds := &creds{ - mode: mode, - newPerRPCCreds: c.newPerRPCCreds, + opts: c.opts, + mode: mode, } // Create transport credentials. @@ -129,7 +138,7 @@ func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { } if mode == internal.CredsBundleModeFallback || mode == internal.CredsBundleModeBackendFromBalancer { - newCreds.perRPCCreds = newCreds.newPerRPCCreds() + newCreds.perRPCCreds = newCreds.opts.PerRPCCreds } return newCreds, nil diff --git a/vendor/google.golang.org/grpc/credentials/google/xds.go b/vendor/google.golang.org/grpc/credentials/google/xds.go index 588c685e2..b8c2e8f92 100644 --- a/vendor/google.golang.org/grpc/credentials/google/xds.go +++ b/vendor/google.golang.org/grpc/credentials/google/xds.go @@ -21,18 +21,19 @@ package google import ( "context" "net" + "strings" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" ) -const cfeClusterName = "google-cfe" +const cfeClusterNamePrefix = "google_cfe_" // clusterTransportCreds is a combo of TLS + ALTS. // // On the client, ClientHandshake picks TLS or ALTS based on address attributes. // - if attributes has cluster name -// - if cluster name is "google_cfe", use TLS +// - if cluster name has prefix "google_cfe_", use TLS // - otherwise, use ALTS // - else, do TLS // @@ -55,7 +56,7 @@ func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority s return c.tls.ClientHandshake(ctx, authority, rawConn) } cn, ok := internal.GetXDSHandshakeClusterName(chi.Attributes) - if !ok || cn == cfeClusterName { + if !ok || strings.HasPrefix(cn, cfeClusterNamePrefix) { return c.tls.ClientHandshake(ctx, authority, rawConn) } // If attributes have cluster name, and cluster name is not cfe, it's a diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go new file mode 100644 index 000000000..82bee1443 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package insecure provides an implementation of the +// credentials.TransportCredentials interface which disables transport security. +package insecure + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// NewCredentials returns a credentials which disables transport security. +// +// Note that using this credentials with per-RPC credentials which require +// transport security is incompatible and will cause grpc.Dial() to fail. +func NewCredentials() credentials.TransportCredentials { + return insecureTC{} +} + +// insecureTC implements the insecure transport credentials. The handshake +// methods simply return the passed in net.Conn and set the security level to +// NoSecurity. +type insecureTC struct{} + +func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{SecurityProtocol: "insecure"} +} + +func (insecureTC) Clone() credentials.TransportCredentials { + return insecureTC{} +} + +func (insecureTC) OverrideServerName(string) error { + return nil +} + +// info contains the auth information for an insecure connection. +// It implements the AuthInfo interface. +type info struct { + credentials.CommonAuthInfo +} + +// AuthType returns the type of info as a string. +func (info) AuthType() string { + return "insecure" +} + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go index 852ae375c..c748fd21c 100644 --- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -23,6 +23,7 @@ import ( "context" "fmt" "io/ioutil" + "net/url" "sync" "golang.org/x/oauth2" @@ -56,6 +57,16 @@ func (ts TokenSource) RequireTransportSecurity() bool { return true } +// removeServiceNameFromJWTURI removes RPC service name from URI. +func removeServiceNameFromJWTURI(uri string) (string, error) { + parsed, err := url.Parse(uri) + if err != nil { + return "", err + } + parsed.Path = "/" + return parsed.String(), nil +} + type jwtAccess struct { jsonKey []byte } @@ -75,9 +86,15 @@ func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) } func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + // Remove RPC service name from URI that will be used as audience + // in a self-signed JWT token. It follows https://google.aip.dev/auth/4111. + aud, err := removeServiceNameFromJWTURI(uri[0]) + if err != nil { + return nil, err + } // TODO: the returned TokenSource is reusable. Store it in a sync.Map, with // uri as the key, to avoid recreating for every RPC. - ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) + ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, aud) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 8ee7124f2..784822d05 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -230,4 +230,7 @@ var cipherSuiteLookup = map[uint16]string{ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", + tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", + tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 7a497237b..f2f605a17 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -20,16 +20,15 @@ package grpc import ( "context" - "fmt" "net" "time" "google.golang.org/grpc/backoff" - "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -45,20 +44,17 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - returnLastError bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by WithBalancerName dial option. - balancerBuilder balancer.Builder - channelzParentID int64 + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -196,25 +192,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig -// instead. Will be removed in a future 1.x release. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -228,18 +205,14 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption { }) } -// WithConnectParams configures the dialer to use the provided ConnectParams. +// WithConnectParams configures the ClientConn to use the provided ConnectParams +// for creating and maintaining connections to servers. // // The backoff configuration specified as part of the ConnectParams overrides // all defaults specified in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider // using the backoff.DefaultConfig as a base, in cases where you want to // override only a subset of the backoff configuration. -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func WithConnectParams(p ConnectParams) DialOption { return newFuncDialOption(func(o *dialOptions) { o.bs = internalbackoff.Exponential{Config: p.Backoff} @@ -277,7 +250,7 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { }) } -// WithBlock returns a DialOption which makes caller of Dial blocks until the +// WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. func WithBlock() DialOption { @@ -303,11 +276,17 @@ func WithReturnConnectionError() DialOption { } // WithInsecure returns a DialOption which disables transport security for this -// ClientConn. Note that transport security is required unless WithInsecure is -// set. +// ClientConn. Under the hood, it uses insecure.NewCredentials(). +// +// Note that using this DialOption with per-RPC credentials (through +// WithCredentialsBundle or WithPerRPCCredentials) which require transport +// security is incompatible and will cause grpc.Dial() to fail. +// +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { - o.insecure = true + o.copts.TransportCredentials = insecure.NewCredentials() }) } @@ -482,8 +461,7 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt } // WithAuthority returns a DialOption that specifies the value to be used as the -// :authority pseudo-header. This value only works with WithInsecure and has no -// effect if TransportCredentials are present. +// :authority pseudo-header and as the server name in authentication handshake. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a @@ -498,7 +476,7 @@ func WithAuthority(a string) DialOption { // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id int64) DialOption { +func WithChannelzParentID(id *channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id }) @@ -519,14 +497,16 @@ func WithDisableServiceConfig() DialOption { // WithDefaultServiceConfig returns a DialOption that configures the default // service config, which will be used in cases where: // -// 1. WithDisableServiceConfig is also used. -// 2. Resolver does not return a service config or if the resolver returns an -// invalid service config. +// 1. WithDisableServiceConfig is also used, or // -// Experimental +// 2. The name resolver does not provide a service config or provides an +// invalid service config. // -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// The parameter s is the JSON representation of the default service config. +// For more information about service configs, see: +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// For a simple example of usage, see: +// examples/features/load_balancing/client/main.go func WithDefaultServiceConfig(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.defaultServiceConfigRawJSON = &s @@ -538,14 +518,8 @@ func WithDefaultServiceConfig(s string) DialOption { // will happen automatically if no data is written to the wire or if the RPC is // unprocessed by the remote server. // -// Retry support is currently disabled by default, but will be enabled by -// default in the future. Until then, it may be enabled by setting the -// environment variable "GRPC_GO_RETRY" to "on". -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Retry support is currently enabled by default, but may be disabled by +// setting the environment variable "GRPC_GO_RETRY" to "off". func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true @@ -585,7 +559,6 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - disableRetry: !envconfig.Retry, healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ WriteBufferSize: defaultWriteBufSize, diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 6d84f74c7..18e530fc9 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -108,7 +108,7 @@ var registeredCodecs = make(map[string]Codec) // more details. // // NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are +// an init() function), and is not thread-safe. If multiple Codecs are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 4ee33171e..7c1f66409 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,11 +19,14 @@ package grpclog import ( + "encoding/json" + "fmt" "io" "io/ioutil" "log" "os" "strconv" + "strings" "google.golang.org/grpc/internal/grpclog" ) @@ -95,8 +98,9 @@ var severityName = []string{ // loggerT is the default logger used by grpclog. type loggerT struct { - m []*log.Logger - v int + m []*log.Logger + v int + jsonFormat bool } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -105,19 +109,32 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) +} + +type loggerV2Config struct { + verbose int + jsonFormat bool +} + +func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { var m []*log.Logger - m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + flag := log.LstdFlags + if c.jsonFormat { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) - m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) - return &loggerT{m: m, v: v} + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -142,58 +159,79 @@ func newLoggerV2() LoggerV2 { if vl, err := strconv.Atoi(vLevel); err == nil { v = vl } - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) + + jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") + + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ + verbose: v, + jsonFormat: jsonFormat, + }) +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) } func (g *loggerT) Info(args ...interface{}) { - g.m[infoLog].Print(args...) + g.output(infoLog, fmt.Sprint(args...)) } func (g *loggerT) Infoln(args ...interface{}) { - g.m[infoLog].Println(args...) + g.output(infoLog, fmt.Sprintln(args...)) } func (g *loggerT) Infof(format string, args ...interface{}) { - g.m[infoLog].Printf(format, args...) + g.output(infoLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Warning(args ...interface{}) { - g.m[warningLog].Print(args...) + g.output(warningLog, fmt.Sprint(args...)) } func (g *loggerT) Warningln(args ...interface{}) { - g.m[warningLog].Println(args...) + g.output(warningLog, fmt.Sprintln(args...)) } func (g *loggerT) Warningf(format string, args ...interface{}) { - g.m[warningLog].Printf(format, args...) + g.output(warningLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Error(args ...interface{}) { - g.m[errorLog].Print(args...) + g.output(errorLog, fmt.Sprint(args...)) } func (g *loggerT) Errorln(args ...interface{}) { - g.m[errorLog].Println(args...) + g.output(errorLog, fmt.Sprintln(args...)) } func (g *loggerT) Errorf(format string, args ...interface{}) { - g.m[errorLog].Printf(format, args...) + g.output(errorLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Fatal(args ...interface{}) { - g.m[fatalLog].Fatal(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) } func (g *loggerT) Fatalln(args ...interface{}) { - g.m[fatalLog].Fatalln(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) } func (g *loggerT) Fatalf(format string, args ...interface{}) { - g.m[fatalLog].Fatalf(format, args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) } func (g *loggerT) V(l int) bool { @@ -210,12 +248,12 @@ func (g *loggerT) V(l int) bool { // later release. type DepthLoggerV2 interface { LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh deleted file mode 100644 index 15ff9facd..000000000 --- a/vendor/google.golang.org/grpc/install_gae.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -TMP=$(mktemp -d /tmp/sdk.XXX) \ -&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ -&& unzip -q $TMP.zip -d $TMP \ -&& export PATH="$PATH:$TMP/go_appengine" \ No newline at end of file diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index 668e0adcf..bb96ef57b 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -72,9 +72,12 @@ type UnaryServerInfo struct { } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 000000000..7ba8f4d18 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,382 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + return + } + balToUpdate.UpdateSubConnState(sc, state) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/remove +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if state.ConnectivityState == connectivity.Shutdown { + bw.gsb.mu.Lock() + delete(bw.subconns, sc) + bw.gsb.mu.Unlock() + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held. Does not panic with a nil receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + bw.gsb.cc.RemoveSubConn(sc) + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + bw.gsb.cc.RemoveSubConn(sc) + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.RemoveSubConn(sc) +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 5cc3aeddb..0a25ce43f 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -31,7 +31,7 @@ import ( // Logger is the global binary logger. It can be used to get binary logger for // each method. type Logger interface { - getMethodLogger(methodName string) *MethodLogger + GetMethodLogger(methodName string) MethodLogger } // binLogger is the global binary logger for the binary. One of this should be @@ -49,17 +49,24 @@ func SetLogger(l Logger) { binLogger = l } +// GetLogger gets the binarg logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + // GetMethodLogger returns the methodLogger for the given methodName. // // methodName should be in the format of "/service/method". // // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { +func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { return nil } - return binLogger.getMethodLogger(methodName) + return binLogger.GetMethodLogger(methodName) } func init() { @@ -68,17 +75,29 @@ func init() { binLogger = NewLoggerFromConfigString(configStr) } -type methodLoggerConfig struct { +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { // Max length of header and message. - hdr, msg uint64 + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} } type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig + config LoggerConfig +} - blacklist map[string]struct{} +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} } // newEmptyLogger creates an empty logger. The map fields need to be filled in @@ -88,57 +107,57 @@ func newEmptyLogger() *logger { } // Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { return fmt.Errorf("conflicting global rules found") } - l.all = ml + l.config.All = ml return nil } // Set method logger for "service/*". // // New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) } - l.services[service] = ml + l.config.Services[service] = ml return nil } // Set method logger for "service/method". // // New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) } - l.methods[method] = ml + l.config.Methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) } - l.blacklist[method] = struct{}{} + l.config.Blacklist[method] = struct{}{} return nil } @@ -148,23 +167,23 @@ func (l *logger) setBlacklist(method string) error { // // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { +func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) if err != nil { grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return newMethodLogger(ml.Header, ml.Message) } - if _, ok := l.blacklist[s+"/"+m]; ok { + if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Services[s]; ok { + return newMethodLogger(ml.Header, ml.Message) } - if l.all == nil { + if l.config.All == nil { return nil } - return newMethodLogger(l.all.hdr, l.all.msg) + return newMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index d8f4e7602..ab589a76b 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil @@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 0cdb41831..24df0a1a0 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -48,7 +48,11 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -type MethodLogger struct { +type MethodLogger interface { + Log(LogEntryConfig) +} + +type methodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -57,8 +61,8 @@ type MethodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ +func newMethodLogger(h, m uint64) *methodLogger { + return &methodLogger{ headerMaxLen: h, messageMaxLen: m, @@ -69,8 +73,10 @@ func newMethodLogger(h, m uint64) *MethodLogger { } } -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in methodLogger as possible. +func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -85,11 +91,15 @@ func (ml *MethodLogger) Log(c LogEntryConfig) { case *pb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } + return m +} - ml.sink.Write(m) +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *methodLogger) Log(c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) } -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -119,7 +129,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index 7d7a3056b..c2fdd58b3 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -69,7 +69,8 @@ type writerSink struct { func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { - grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err) + grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) + return err } hdr := make([]byte, 4) binary.BigEndian.PutUint32(hdr, uint32(len(b))) @@ -85,24 +86,27 @@ func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { func (ws *writerSink) Close() error { return nil } type bufferedSink struct { - mu sync.Mutex - closer io.Closer - out Sink // out is built on buf. - buf *bufio.Writer // buf is kept for flush. - - writeStartOnce sync.Once - writeTicker *time.Ticker + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + flusherStarted bool + + writeTicker *time.Ticker + done chan struct{} } func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { - // Start the write loop when Write is called. - fs.writeStartOnce.Do(fs.startFlushGoroutine) fs.mu.Lock() + defer fs.mu.Unlock() + if !fs.flusherStarted { + // Start the write loop when Write is called. + fs.startFlushGoroutine() + fs.flusherStarted = true + } if err := fs.out.Write(e); err != nil { - fs.mu.Unlock() return err } - fs.mu.Unlock() return nil } @@ -113,7 +117,12 @@ const ( func (fs *bufferedSink) startFlushGoroutine() { fs.writeTicker = time.NewTicker(bufFlushDuration) go func() { - for range fs.writeTicker.C { + for { + select { + case <-fs.done: + return + case <-fs.writeTicker.C: + } fs.mu.Lock() if err := fs.buf.Flush(); err != nil { grpclogLogger.Warningf("failed to flush to Sink: %v", err) @@ -124,10 +133,12 @@ func (fs *bufferedSink) startFlushGoroutine() { } func (fs *bufferedSink) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() if fs.writeTicker != nil { fs.writeTicker.Stop() } - fs.mu.Lock() + close(fs.done) if err := fs.buf.Flush(); err != nil { grpclogLogger.Warningf("failed to flush to Sink: %v", err) } @@ -137,7 +148,6 @@ func (fs *bufferedSink) Close() error { if err := fs.out.Close(); err != nil { grpclogLogger.Warningf("failed to close the Sink: %v", err) } - fs.mu.Unlock() return nil } @@ -155,5 +165,6 @@ func NewBufferedSink(o io.WriteCloser) Sink { closer: o, out: newWriterSink(bufW), buf: bufW, + done: make(chan struct{}), } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index f73141393..777cbcd79 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,6 +24,8 @@ package channelz import ( + "context" + "errors" "fmt" "sort" "sync" @@ -49,7 +51,8 @@ var ( // TurnOn turns on channelz data collection. func TurnOn() { if !IsOn() { - NewChannelzStorage() + db.set(newChannelMap()) + idGen.reset() atomic.StoreInt32(&curState, 1) } } @@ -94,46 +97,40 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorage initializes channelz data storage and id generator. +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. // -// This function returns a cleanup function to wait for all channelz state to be reset by the -// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests -// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen -// to remove some entity just register by the new test, since the id space is the same. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() (cleanup func() error) { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { + db.set(newChannelMap()) idGen.reset() + return func() error { - var err error cm := db.get() if cm == nil { return nil } - for i := 0; i < 1000; i++ { - cm.mu.Lock() - if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { - cm.mu.Unlock() - // all things stored in the channelz map have been cleared. + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + cm.mu.RLock() + topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) + cm.mu.RUnlock() + + if err := ctx.Err(); err != nil { + return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) + } + if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { return nil } - cm.mu.Unlock() - time.Sleep(10 * time.Millisecond) + <-ticker.C } - - cm.mu.Lock() - err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) - cm.mu.Unlock() - return err } } @@ -188,54 +185,77 @@ func GetServer(id int64) *ServerMetric { return db.get().GetServer(id) } -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { id := idGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + cn := &channel{ refName: ref, c: c, subChans: make(map[int64]string), nestedChans: make(map[int64]string), id: id, - pid: pid, + pid: parent, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - if pid == 0 { - db.get().addChannel(id, cn, true, pid, ref) - } else { - db.get().addChannel(id, cn, false, pid, ref) - } - return id + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) } -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a SubChannel's parent id cannot be 0") - return 0 +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") } id := idGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + sc := &subChannel{ refName: ref, c: c, sockets: make(map[int64]string), id: id, - pid: pid, + pid: pid.Int(), trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid, ref) - return id + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { id := idGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + svr := &server{ refName: ref, s: s, @@ -244,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 { id: id, } db.get().addServer(id, svr) - return id + return newIdentifer(RefServer, id, nil) } // RegisterListenSocket registers the given listen socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a ListenSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") } id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid, ref) - return id + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil } // RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent +// with ref as its reference name, and adds it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a NormalSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") } id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid, ref) - return id + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil } -// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. type TraceEventDesc struct { Desc string Severity Severity Parent *TraceEventDesc } -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { - for d := desc; d != nil; d = d.Parent { - switch d.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, d.Desc) - case CtWarning: - l.WarningDepth(depth+1, d.Desc) - case CtError: - l.ErrorDepth(depth+1, d.Desc) - } +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) } + if getMaxTraceEntry() == 0 { return } - db.get().traceEvent(id, desc) + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } } // channelMap is the storage data structure for channelz. @@ -326,6 +367,17 @@ type channelMap struct { normalSockets map[int64]*normalSocket } +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + func (c *channelMap) addServer(id int64, s *server) { c.mu.Lock() s.cm = c @@ -333,7 +385,7 @@ func (c *channelMap) addServer(id int64, s *server) { c.mu.Unlock() } -func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { c.mu.Lock() cn.cm = c cn.trace.cm = c @@ -346,7 +398,7 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in c.mu.Unlock() } -func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { c.mu.Lock() sc.cm = c sc.trace.cm = c @@ -355,7 +407,7 @@ func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref stri c.mu.Unlock() } -func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { c.mu.Lock() ls.cm = c c.listenSockets[id] = ls @@ -363,7 +415,7 @@ func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref c.mu.Unlock() } -func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { c.mu.Lock() ns.cm = c c.normalSockets[id] = ns @@ -630,7 +682,7 @@ func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) if count == 0 { end = true } - var s []*SocketMetric + s := make([]*SocketMetric, 0, len(sks)) for _, ns := range sks { sm := &SocketMetric{} sm.SocketData = ns.s.ChannelzMetric() diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 000000000..c9a27acd3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index b0013f9c8..8e13a3d2c 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,77 +26,54 @@ import ( var logger = grpclog.Component("channelz") +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, args...) - } +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, msg) - } +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, args...) - } +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, msg) - } +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtError, - }) - } else { - l.ErrorDepth(1, args...) - } +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtError, - }) - } else { - l.ErrorDepth(1, msg) - } +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 3c595d154..ad0ce4dab 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -686,12 +686,33 @@ const ( type RefChannelType int const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota + RefChannel // RefSubChannel indicates the referenced entity is a SubChannel. RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket ) +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + func (c *channelTrace) dumpData() *ChannelTrace { c.mu.Lock() ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go index 692dd6181..1b1c4cce3 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go index 19c2fc521..8b06eed1a 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * @@ -37,6 +38,6 @@ type SocketOptionData struct { // Windows OS doesn't support Socket Option func (s *SocketOptionData) Getsockopt(fd uintptr) { once.Do(func() { - logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.") + logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index fdf409d55..8d194e44e 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -1,5 +1,3 @@ -// +build linux,!appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index 8864a0811..837ddc402 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go index be70b6cdf..25ade6230 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2020 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go index f499a614c..2919632d6 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go index 55664fa46..f792fd22c 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/util.go +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -18,7 +18,9 @@ package credentials -import "crypto/tls" +import ( + "crypto/tls" +) const alpnProtoStrH2 = "h2" diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 73931a94b..6f0272543 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -26,13 +26,10 @@ import ( const ( prefix = "GRPC_GO_" - retryStr = prefix + "RETRY" txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" ) var ( - // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". - Retry = strings.EqualFold(os.Getenv(retryStr), "on") // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") ) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go new file mode 100644 index 000000000..7d996e51b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -0,0 +1,101 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import ( + "os" + "strings" +) + +const ( + // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. + // Do not use this and read from env directly. Its value is read and kept in + // variable XDSBootstrapFileName. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file + // content. Do not use this and read from env directly. Its value is read + // and kept in variable XDSBootstrapFileContent. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" + + ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" + clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" + aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" + outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" + federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" + rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" + + c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" +) + +var ( + // XDSBootstrapFileName holds the name of the file which contains xDS + // bootstrap configuration. Users can specify the location of the bootstrap + // file by setting the environment variable "GRPC_XDS_BOOTSTRAP". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) + // XDSBootstrapFileContent holds the content of the xDS bootstrap + // configuration. Users can specify the bootstrap config by setting the + // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) + // XDSRingHash indicates whether ring hash support is enabled, which can be + // disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". + XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") + // XDSClientSideSecurity is used to control processing of security + // configuration on the client-side. + // + // Note that there is no env var protection for the server-side because we + // have a brand new API on the server-side and users explicitly need to use + // the new API to get security integration on the server. + XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") + // XDSAggregateAndDNS indicates whether processing of aggregated cluster + // and DNS cluster is enabled, which can be enabled by setting the + // environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to + // "true". + XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") + + // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, + // which can be disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". + XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true". + XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true") + // XDSFederation indicates whether federation support is enabled. + XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") + + // XDSRLS indicates whether processing of Cluster Specifier plugins and + // support for the RLS CLuster Specifier is enabled, which can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to + // "true". + XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true") + + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) +) diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go index d6c9e03fc..6717b757f 100644 --- a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go +++ b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go @@ -20,13 +20,6 @@ package googlecloud import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "regexp" "runtime" "strings" "sync" @@ -35,43 +28,9 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -const ( - linuxProductNameFile = "/sys/class/dmi/id/product_name" - windowsCheckCommand = "powershell.exe" - windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" - powershellOutputFilter = "Manufacturer" - windowsManufacturerRegex = ":(.*)" - - logPrefix = "[googlecloud]" -) +const logPrefix = "[googlecloud]" var ( - // The following two variables will be reassigned in tests. - runningOS = runtime.GOOS - manufacturerReader = func() (io.Reader, error) { - switch runningOS { - case "linux": - return os.Open(linuxProductNameFile) - case "windows": - cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) - out, err := cmd.Output() - if err != nil { - return nil, err - } - for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { - if strings.HasPrefix(line, powershellOutputFilter) { - re := regexp.MustCompile(windowsManufacturerRegex) - name := re.FindString(line) - name = strings.TrimLeft(name, ":") - return strings.NewReader(name), nil - } - } - return nil, errors.New("cannot determine the machine's manufacturer") - default: - return nil, fmt.Errorf("%s is not supported", runningOS) - } - } - vmOnGCEOnce sync.Once vmOnGCE bool @@ -84,21 +43,21 @@ var ( // package. We keep this to avoid depending on the cloud library module. func OnGCE() bool { vmOnGCEOnce.Do(func() { - vmOnGCE = isRunningOnGCE() + mf, err := manufacturer() + if err != nil { + logger.Infof("failed to read manufacturer, setting onGCE=false: %v") + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) }) return vmOnGCE } -// isRunningOnGCE checks whether the local system, without doing a network request is +// isRunningOnGCE checks whether the local system, without doing a network request, is // running on GCP. -func isRunningOnGCE() bool { - manufacturer, err := readManufacturer() - if err != nil { - logger.Infof("failed to read manufacturer %v, returning OnGCE=false", err) - return false - } +func isRunningOnGCE(manufacturer []byte, goos string) bool { name := string(manufacturer) - switch runningOS { + switch goos { case "linux": name = strings.TrimSpace(name) return name == "Google" || name == "Google Compute Engine" @@ -111,18 +70,3 @@ func isRunningOnGCE() bool { return false } } - -func readManufacturer() ([]byte, error) { - reader, err := manufacturerReader() - if err != nil { - return nil, err - } - if reader == nil { - return nil, errors.New("got nil reader") - } - manufacturer, err := ioutil.ReadAll(reader) - if err != nil { - return nil, fmt.Errorf("failed reading %v: %v", linuxProductNameFile, err) - } - return manufacturer, nil -} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go similarity index 72% rename from vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go rename to vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go index a6144cd66..ffa0f1dde 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go @@ -1,8 +1,9 @@ -// +build appengine +//go:build !(linux || windows) +// +build !linux,!windows /* * - * Copyright 2018 gRPC authors. + * Copyright 2022 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,13 +19,8 @@ * */ -package credentials +package googlecloud -import ( - "net" -) - -// WrapSyscallConn returns newConn on appengine. -func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { - return newConn +func manufacturer() ([]byte, error) { + return nil, nil } diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go similarity index 71% rename from vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go rename to vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go index af6f57719..e53b8ffc8 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go @@ -1,8 +1,6 @@ -// +build appengine - /* * - * Copyright 2020 gRPC authors. + * Copyright 2022 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,14 +16,12 @@ * */ -package credentials +package googlecloud + +import "io/ioutil" -import ( - "crypto/tls" - "net/url" -) +const linuxProductNameFile = "/sys/class/dmi/id/product_name" -// SPIFFEIDFromState is a no-op for appengine builds. -func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { - return nil +func manufacturer() ([]byte, error) { + return ioutil.ReadFile(linuxProductNameFile) } diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go new file mode 100644 index 000000000..2d7aaaaa7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index e6f975cbf..30a3b4258 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -115,12 +115,12 @@ type LoggerV2 interface { // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 200b115ca..740f83c2b 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -31,26 +31,37 @@ var ( mu sync.Mutex ) +// Int implements rand.Int on the grpcrand global source. +func Int() int { + mu.Lock() + defer mu.Unlock() + return r.Int() +} + // Int63n implements rand.Int63n on the grpcrand global source. func Int63n(n int64) int64 { mu.Lock() - res := r.Int63n(n) - mu.Unlock() - return res + defer mu.Unlock() + return r.Int63n(n) } // Intn implements rand.Intn on the grpcrand global source. func Intn(n int) int { mu.Lock() - res := r.Intn(n) - mu.Unlock() - return res + defer mu.Unlock() + return r.Intn(n) } // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() - res := r.Float64() - mu.Unlock() - return res + defer mu.Unlock() + return r.Float64() +} + +// Uint64 implements rand.Uint64 on the grpcrand global source. +func Uint64() uint64 { + mu.Lock() + defer mu.Unlock() + return r.Uint64() } diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go similarity index 67% rename from vendor/google.golang.org/grpc/internal/resolver/dns/go113.go rename to vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go index 8783a8cf8..e2f948e8f 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go @@ -1,8 +1,6 @@ -// +build go1.13 - /* * - * Copyright 2019 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,16 +16,5 @@ * */ -package dns - -import "net" - -func init() { - filterError = func(err error) error { - if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { - // The name does not exist; not an error. - return nil - } - return err - } -} +// Package grpcutil provides utility functions used across the gRPC codebase. +package grpcutil diff --git a/vendor/google.golang.org/grpc/credentials/go12.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go similarity index 58% rename from vendor/google.golang.org/grpc/credentials/go12.go rename to vendor/google.golang.org/grpc/internal/grpcutil/regex.go index ccbf35b33..7a092b2b8 100644 --- a/vendor/google.golang.org/grpc/credentials/go12.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -1,8 +1,6 @@ -// +build go1.12 - /* * - * Copyright 2019 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,13 +16,16 @@ * */ -package credentials +package grpcutil -import "crypto/tls" +import "regexp" -// This init function adds cipher suite constants only defined in Go 1.12. -func init() { - cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" - cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" - cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) + } + re.Longest() + rem := re.FindString(text) + return len(rem) == len(text) } diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go deleted file mode 100644 index 8833021da..000000000 --- a/vendor/google.golang.org/grpc/internal/grpcutil/target.go +++ /dev/null @@ -1,89 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcutil provides a bunch of utility functions to be used across the -// gRPC codebase. -package grpcutil - -import ( - "strings" - - "google.golang.org/grpc/resolver" -) - -// split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. -func split2(s, sep string) (string, string, bool) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - return "", "", false - } - return spl[0], spl[1], true -} - -// ParseTarget splits target into a resolver.Target struct containing scheme, -// authority and endpoint. skipUnixColonParsing indicates that the parse should -// not parse "unix:[path]" cases. This should be true in cases where a custom -// dialer is present, to prevent a behavior change. -// -// If target is not a valid scheme://authority/endpoint as specified in -// https://github.com/grpc/grpc/blob/master/doc/naming.md, -// it returns {Endpoint: target}. -func ParseTarget(target string, skipUnixColonParsing bool) (ret resolver.Target) { - var ok bool - if strings.HasPrefix(target, "unix-abstract:") { - if strings.HasPrefix(target, "unix-abstract://") { - // Maybe, with Authority specified, try to parse it - var remain string - ret.Scheme, remain, _ = split2(target, "://") - ret.Authority, ret.Endpoint, ok = split2(remain, "/") - if !ok { - // No Authority, add the "//" back - ret.Endpoint = "//" + remain - } else { - // Found Authority, add the "/" back - ret.Endpoint = "/" + ret.Endpoint - } - } else { - // Without Authority specified, split target on ":" - ret.Scheme, ret.Endpoint, _ = split2(target, ":") - } - return ret - } - ret.Scheme, ret.Endpoint, ok = split2(target, "://") - if !ok { - if strings.HasPrefix(target, "unix:") && !skipUnixColonParsing { - // Handle the "unix:[local/path]" and "unix:[/absolute/path]" cases, - // because splitting on :// only handles the - // "unix://[/absolute/path]" case. Only handle if the dialer is nil, - // to avoid a behavior change with custom dialers. - return resolver.Target{Scheme: "unix", Endpoint: target[len("unix:"):]} - } - return resolver.Target{Endpoint: target} - } - ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") - if !ok { - return resolver.Target{Endpoint: target} - } - if ret.Scheme == "unix" { - // Add the "/" back in the unix case, so the unix resolver receives the - // actual endpoint in the "unix://[/absolute/path]" case. - ret.Endpoint = "/" + ret.Endpoint - } - return ret -} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 1b596bf35..6d355b0b0 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -38,11 +38,10 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // ParseServiceConfigForTesting is for creating a fake - // ClientConn for resolver testing only - ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and - // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the // there's difference in white space. EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool @@ -86,3 +85,9 @@ const ( // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index 302262613..b2980f8ac 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -22,6 +22,9 @@ package metadata import ( + "fmt" + "strings" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -30,14 +33,38 @@ type mdKeyType string const mdKey = mdKeyType("grpc.internal.address.metadata") +type mdValue metadata.MD + +func (m mdValue) Equal(o interface{}) bool { + om, ok := o.(mdValue) + if !ok { + return false + } + if len(m) != len(om) { + return false + } + for k, v := range m { + ov := om[k] + if len(ov) != len(v) { + return false + } + for i, ve := range v { + if ov[i] != ve { + return false + } + } + } + return true +} + // Get returns the metadata of addr. func Get(addr resolver.Address) metadata.MD { attrs := addr.Attributes if attrs == nil { return nil } - md, _ := attrs.Value(mdKey).(metadata.MD) - return md + md, _ := attrs.Value(mdKey).(mdValue) + return metadata.MD(md) } // Set sets (overrides) the metadata in addr. @@ -45,6 +72,49 @@ func Get(addr resolver.Address) metadata.MD { // When a SubConn is created with this address, the RPCs sent on it will all // have this metadata. func Set(addr resolver.Address, md metadata.MD) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(mdKey, md) + addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) return addr } + +// Validate returns an error if the input md contains invalid keys or values. +// +// If the header is not a pseudo-header, the following items are checked: +// - header names must contain one or more characters from this set [0-9 a-z _ - .]. +// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. +// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +func Validate(md metadata.MD) error { + for k, vals := range md { + // pseudo-header will be ignored + if k[0] == ':' { + continue + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(k); i++ { + r := k[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) + } + } + if strings.HasSuffix(k, "-bin") { + continue + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) + } + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 000000000..0177af4b5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e interface{}) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index 5e7f36703..c7a18a948 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -117,9 +117,12 @@ type ClientInterceptor interface { NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) } -// ServerInterceptor is unimplementable; do not use. +// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. type ServerInterceptor interface { - notDefined() + // AllowRPC checks if an incoming RPC is allowed to proceed based on + // information about connection RPC was received on, and HTTP Headers. This + // information will be piped into context. + AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. } type csKeyType string @@ -129,7 +132,7 @@ const csKey = csKeyType("grpc.internal.resolver.configSelector") // SetConfigSelector sets the config selector in state and returns the new // state. func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { - state.Attributes = state.Attributes.WithValues(csKey, cs) + state.Attributes = state.Attributes.WithValue(csKey, cs) return state } diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 03825bbe7..75301c514 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -277,18 +277,13 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { return newAddrs, nil } -var filterError = func(err error) error { +func handleDNSError(err error, lookupType string) error { if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). return nil } - return err -} - -func handleDNSError(err error, lookupType string) error { - err = filterError(err) if err != nil { err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) logger.Info(err) @@ -323,12 +318,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { } func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { - var newAddrs []resolver.Address addrs, err := d.resolver.LookupHost(d.ctx, d.host) if err != nil { err = handleDNSError(err, "A") return nil, err } + newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { ip, ok := formatIP(a) if !ok { diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 0d5a811dd..20852e59d 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -37,7 +37,17 @@ func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolv if target.Authority != "" { return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) } - addr := resolver.Address{Addr: target.Endpoint} + + // gRPC was parsing the dial target manually before PR #4817, and we + // switched to using url.Parse() in that PR. To avoid breaking existing + // resolver implementations we ended up stripping the leading "/" from the + // endpoint. This obviously does not work for the "unix" scheme. Hence we + // end up using the parsed URL instead. + endpoint := target.URL.Path + if endpoint == "" { + endpoint = target.URL.Opaque + } + addr := resolver.Address{Addr: endpoint} if b.scheme == unixAbstractScheme { // prepend "\x00" to address for unix-abstract addr.Addr = "\x00" + addr.Addr diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go index c0634d152..badbdbf59 100644 --- a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -78,6 +78,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { return err } + var names []string for i, lbcfg := range ir { if len(lbcfg) != 1 { return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) @@ -92,6 +93,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { for name, jsonCfg = range lbcfg { } + names = append(names, name) builder := balancer.Get(name) if builder == nil { // If the balancer is not registered, move on to the next config. @@ -120,7 +122,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { // return. This means we had a loadBalancingConfig slice but did not // encounter a registered policy. The config is considered invalid in this // case. - return fmt.Errorf("invalid loadBalancingConfig: no supported policies found") + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) } // MethodConfig defines the configuration recommended by the service providers for a diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 710223b8d..e5c6513ed 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -97,7 +97,7 @@ func (s *Status) Err() error { if s.Code() == codes.OK { return nil } - return &Error{e: s.Proto()} + return &Error{s: s} } // WithDetails returns a new status with the provided details messages appended to the status. @@ -136,19 +136,23 @@ func (s *Status) Details() []interface{} { return details } +func (s *Status) String() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + // Error wraps a pointer of a status proto. It implements error and Status, // and a nil *Error should never be returned by this package. type Error struct { - e *spb.Status + s *Status } func (e *Error) Error() string { - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage()) + return e.s.String() } // GRPCStatus returns the Status represented by se. func (e *Error) GRPCStatus() *Status { - return FromProto(e.e) + return e.s } // Is implements future error.Is functionality. @@ -158,5 +162,5 @@ func (e *Error) Is(target error) bool { if !ok { return false } - return proto.Equal(e.e, tse.e) + return proto.Equal(e.s.s, tse.s.s) } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go index 4b2964f2a..b3a72276d 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index 7913ef1db..999f52cd7 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * @@ -35,41 +36,41 @@ var logger = grpclog.Component("core") func log() { once.Do(func() { - logger.Info("CPU time info is unavailable on non-linux or appengine environment.") + logger.Info("CPU time info is unavailable on non-linux environments.") }) } -// GetCPUTime returns the how much CPU time has passed since the start of this process. -// It always returns 0 under non-linux or appengine environment. +// GetCPUTime returns the how much CPU time has passed since the start of this +// process. It always returns 0 under non-linux environments. func GetCPUTime() int64 { log() return 0 } -// Rusage is an empty struct under non-linux or appengine environment. +// Rusage is an empty struct under non-linux environments. type Rusage struct{} -// GetRusage is a no-op function under non-linux or appengine environment. +// GetRusage is a no-op function under non-linux environments. func GetRusage() *Rusage { log() return nil } // CPUTimeDiff returns the differences of user CPU time and system CPU time used -// between two Rusage structs. It a no-op function for non-linux or appengine environment. +// between two Rusage structs. It a no-op function for non-linux environments. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { log() return 0, 0 } -// SetTCPUserTimeout is a no-op function under non-linux or appengine environments +// SetTCPUserTimeout is a no-op function under non-linux environments. func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { log() return nil } -// GetTCPUserTimeout is a no-op function under non-linux or appengine environments -// a negative return value indicates the operation is not supported +// GetTCPUserTimeout is a no-op function under non-linux environments. +// A negative return value indicates the operation is not supported func GetTCPUserTimeout(conn net.Conn) (int, error) { log() return -1, nil diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index f63a01376..244f4b081 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -133,9 +133,11 @@ type cleanupStream struct { func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM type earlyAbortStream struct { + httpStatus uint32 streamID uint32 contentSubtype string status *status.Status + rst bool } func (*earlyAbortStream) isTransportResponseFrame() bool { return false } @@ -296,7 +298,7 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // *chan struct{} + trfChan atomic.Value // chan struct{} } func newControlBuffer(done <-chan struct{}) *controlBuffer { @@ -310,10 +312,10 @@ func newControlBuffer(done <-chan struct{}) *controlBuffer { // throttle blocks if there are too many incomingSettings/cleanupStreams in the // controlbuf. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(*chan struct{}) + ch, _ := c.trfChan.Load().(chan struct{}) if ch != nil { select { - case <-*ch: + case <-ch: case <-c.done: } } @@ -347,8 +349,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - ch := make(chan struct{}) - c.trfChan.Store(&ch) + c.trfChan.Store(make(chan struct{})) } } c.mu.Unlock() @@ -389,9 +390,9 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are removing the frame that put us over the // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(*chan struct{}) - close(*ch) - c.trfChan.Store((*chan struct{})(nil)) + ch := c.trfChan.Load().(chan struct{}) + close(ch) + c.trfChan.Store((chan struct{})(nil)) } c.transportResponseFrames-- } @@ -407,7 +408,6 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - c.finish() return nil, ErrConnClosing } } @@ -432,6 +432,14 @@ func (c *controlBuffer) finish() { hdr.onOrphaned(ErrConnClosing) } } + // In case throttle() is currently in flight, it needs to be unblocked. + // Otherwise, the transport may not close, since the transport is closed by + // the reader encountering the connection error. + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + close(ch) + } + c.trfChan.Store((chan struct{})(nil)) c.mu.Unlock() } @@ -765,9 +773,12 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if l.side == clientSide { return errors.New("earlyAbortStream not handled on client") } - + // In case the caller forgets to set the http status, default to 200. + if eas.httpStatus == 0 { + eas.httpStatus = 200 + } headerFields := []hpack.HeaderField{ - {Name: ":status", Value: "200"}, + {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, @@ -776,6 +787,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { return err } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } return nil } diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index f262edd8e..97198c515 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -136,12 +136,10 @@ type inFlow struct { // newLimit updates the inflow window to a new value n. // It assumes that n is always greater than the old limit. -func (f *inFlow) newLimit(n uint32) uint32 { +func (f *inFlow) newLimit(n uint32) { f.mu.Lock() - d := n - f.limit f.limit = n f.mu.Unlock() - return d } func (f *inFlow) maybeAdjust(n uint32) uint32 { diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 05d3871e6..1c3459c2b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -141,9 +141,8 @@ type serverHandlerTransport struct { stats stats.Handler } -func (ht *serverHandlerTransport) Close() error { +func (ht *serverHandlerTransport) Close() { ht.closeOnce.Do(ht.closeCloseChanOnce) - return nil } func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 48c5e52ed..24ca59084 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -24,6 +24,8 @@ import ( "io" "math" "net" + "net/http" + "path/filepath" "strconv" "strings" "sync" @@ -130,7 +132,7 @@ type http2Client struct { kpDormant bool // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData onGoAway func(GoAwayReason) @@ -145,13 +147,20 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error address := addr.Addr networkType, ok := networktype.Get(addr) if fn != nil { + // Special handling for unix scheme with custom dialer. Back in the day, + // we did not have a unix resolver and therefore targets with a unix + // scheme would end up using the passthrough resolver. So, user's used a + // custom dialer in this case and expected the original dial target to + // be passed to the custom dialer. Now, we have a unix resolver. But if + // a custom dialer is specified, we want to retain the old behavior in + // terms of the address being passed to the custom dialer. if networkType == "unix" && !strings.HasPrefix(address, "\x00") { - // For backward compatibility, if the user dialed "unix:///path", - // the passthrough resolver would be used and the user's custom - // dialer would see "unix:///path". Since the unix resolver is used - // and the address is now "/path", prepend "unix://" so the user's - // custom dialer sees the same address. - return fn(ctx, "unix://"+address) + // Supported unix targets are either "unix://absolute-path" or + // "unix:relative-path". + if filepath.IsAbs(address) { + return fn(ctx, "unix://"+address) + } + return fn(ctx, "unix:"+address) } return fn(ctx, address) } @@ -192,6 +201,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }() + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the dialer and credential handshaker. This makes it possible for + // address specific arbitrary data to reach custom dialers and credential handshakers. + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) if err != nil { if opts.FailOnNonTempDialError { @@ -236,12 +251,15 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } } if transportCreds != nil { - // gRPC, resolver, balancer etc. can specify arbitrary data in the - // Attributes field of resolver.Address, which is shoved into connectCtx - // and passed to the credential handshaker. This makes it possible for - // address specific arbitrary data to reach the credential handshaker. - connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) + rawConn := conn + // Pull the deadline from the connectCtx, which will be used for + // timeouts in the authentication protocol handshake. Can ignore the + // boolean as the deadline will return the zero value, which will make + // the conn not timeout on I/O operations. + deadline, _ := connectCtx.Deadline() + rawConn.SetDeadline(deadline) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) + rawConn.SetDeadline(time.Time{}) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } @@ -333,8 +351,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } t.statsHandler.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) @@ -399,11 +418,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) } } - // If it's a connection error, let reader goroutine handle it - // since there might be data in the buffers. - if _, ok := err.(net.Error); !ok { - t.conn.Close() - } + // Do not close the transport. Let reader goroutine handle it since + // there might be data in the buffers. + t.conn.Close() + t.controlBuf.finish() close(t.writerDone) }() return t, nil @@ -571,7 +589,7 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s return nil, err } - return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) } for k, v := range data { // Capital header names are illegal in HTTP/2. @@ -608,26 +626,34 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call return callAuthData, nil } -// PerformedIOError wraps an error to indicate IO may have been performed -// before the error occurred. -type PerformedIOError struct { +// NewStreamError wraps an error and reports additional information. Typically +// NewStream errors result in transparent retry, as they mean nothing went onto +// the wire. However, there are two notable exceptions: +// +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. +type NewStreamError struct { Err error + + AllowTransparentRetry bool } -// Error implements error. -func (p PerformedIOError) Error() string { - return p.Err.Error() +func (e NewStreamError) Error() string { + return e.Err.Error() } // NewStream creates a stream and registers it into the transport as "active" -// streams. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { +// streams. All non-nil errors returned will be *NewStreamError. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - // We may have performed I/O in the per-RPC creds callback, so do not - // allow transparent retry. - return nil, PerformedIOError{err} + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -727,23 +753,24 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return true }, hdr) if err != nil { - return nil, err + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} } if success { break } if hdrListSizeErr != nil { - return nil, hdrListSizeErr + return nil, &NewStreamError{Err: hdrListSizeErr} } firstTry = false select { case <-ch: - case <-s.ctx.Done(): - return nil, ContextErr(s.ctx.Err()) + case <-ctx.Done(): + return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, errStreamDrain + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} case <-t.ctx.Done(): - return nil, ErrConnClosing + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } if t.statsHandler != nil { @@ -872,18 +899,22 @@ func (t *http2Client) Close(err error) { t.controlBuf.finish() t.cancel() t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() + + var st *status.Status if len(goAwayDebugMessage) > 0 { - err = fmt.Errorf("closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + err = st.Err() + } else { + st = status.New(codes.Unavailable, err.Error()) } + // Notify all active streams. for _, s := range streams { - t.closeStream(s, err, false, http2.ErrCodeNo, status.New(codes.Unavailable, err.Error()), nil, false) + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } if t.statsHandler != nil { connEnd := &stats.ConnEnd{ @@ -1050,7 +1081,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) { } // The server has closed the stream without sending trailers. Record that // the read direction is closed, and set the status appropriately. - if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) } } @@ -1221,7 +1252,11 @@ func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayTooManyPings } } - t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %v", f.ErrCode, string(f.DebugData())) + if len(f.DebugData()) == 0 { + t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) + } else { + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) + } } func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { @@ -1254,35 +1289,128 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - state := &decodeState{} - // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. - state.data.isGRPC = !initialHeader - if h2code, err := state.decodeHeader(frame); err != nil { - t.closeStream(s, err, true, h2code, status.Convert(err), nil, endStream) + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + se := status.New(codes.Internal, "peer header list size exceeded limit") + t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) return } - isHeader := false - defer func() { - if t.statsHandler != nil { - if isHeader { - inHeader := &stats.InHeader{ - Client: true, - WireLength: int(frame.Header().Length), - Header: s.header.Copy(), - Compression: s.recvCompress, - } - t.statsHandler.HandleRPC(s.ctx, inHeader) - } else { - inTrailer := &stats.InTrailer{ - Client: true, - WireLength: int(frame.Header().Length), - Trailer: s.trailer.Copy(), - } - t.statsHandler.HandleRPC(s.ctx, inTrailer) + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = !initialHeader + mdata = make(map[string][]string) + contentTypeErr = "malformed header: missing HTTP content-type" + grpcMessage string + statusGen *status.Status + recvCompress string + httpStatusCode *int + httpStatusErr string + rawStatusCode = codes.Unknown + // headerError is set if an error is encountered while parsing the headers + headerError string + ) + + if initialHeader { + httpStatusErr = "malformed header: missing HTTP status" + } + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { + contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) + break + } + contentTypeErr = "" + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + isGRPC = true + case "grpc-encoding": + recvCompress = hf.Value + case "grpc-status": + code, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) + case "grpc-status-details-bin": + var err error + statusGen, err = decodeGRPCStatusDetails(hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) + } + case ":status": + if hf.Value == "200" { + httpStatusErr = "" + statusCode := 200 + httpStatusCode = &statusCode + break + } + + c, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + statusCode := int(c) + httpStatusCode = &statusCode + + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break } + mdata[hf.Name] = append(mdata[hf.Name], v) } - }() + } + + if !isGRPC || httpStatusErr != "" { + var code = codes.Internal // when header does not include HTTP status, return INTERNAL + + if httpStatusCode != nil { + var ok bool + code, ok = HTTPStatusConvTab[*httpStatusCode] + if !ok { + code = codes.Unknown + } + } + var errs []string + if httpStatusErr != "" { + errs = append(errs, httpStatusErr) + } + if contentTypeErr != "" { + errs = append(errs, contentTypeErr) + } + // Verify the HTTP response is a 200. + se := status.New(code, strings.Join(errs, "; ")) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + if headerError != "" { + se := status.New(codes.Internal, headerError) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + isHeader := false // If headerChan hasn't been closed yet if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { @@ -1293,9 +1421,9 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. - s.recvCompress = state.data.encoding - if len(state.data.mdata) > 0 { - s.header = state.data.mdata + s.recvCompress = recvCompress + if len(mdata) > 0 { + s.header = mdata } } else { // HEADERS frame block carries a Trailers-Only. @@ -1304,13 +1432,36 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { close(s.headerChan) } + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + Compression: s.recvCompress, + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: metadata.MD(mdata).Copy(), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + if !endStream { return } + if statusGen == nil { + statusGen = status.New(rawStatusCode, grpcMessage) + } + // if client received END_STREAM from server while stream was still active, send RST_STREAM rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } // reader runs as a separate goroutine in charge of reading data from network @@ -1405,7 +1556,7 @@ func minTime(a, b time.Duration) time.Duration { return b } -// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 11be5599c..45d7bd145 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,7 +21,6 @@ package transport import ( "bytes" "context" - "errors" "fmt" "io" "math" @@ -36,6 +35,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -52,10 +52,10 @@ import ( var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") ) // serverConnectionCounter counts the number of connections a server has seen @@ -73,7 +73,6 @@ type http2Server struct { writerDone chan struct{} // sync point to enable testing. remoteAddr net.Addr localAddr net.Addr - maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection inTapHandle tap.ServerInHandle framer *framer @@ -102,11 +101,11 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when drain(...) is called the first time. + // drainChan is initialized when Drain() is called the first time. // After which the server writes out the first GoAway(with ID 2^31-1) frame. // Then an independent goroutine will be launched to later send the second GoAway. // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is // already underway. drainChan chan struct{} state transportState @@ -118,16 +117,42 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData bufferPool *bufferPool connectionID uint64 + + // maxStreamMu guards the maximum stream ID + // This lock may not be taken if mu is already held. + maxStreamMu sync.Mutex + maxStreamID uint32 // max stream ID ever seen } -// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is -// returned if something goes wrong. -func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a nil transport and a non-nil error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + var authInfo credentials.AuthInfo + rawConn := conn + if config.Credentials != nil { + var err error + conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away + // from gRPC; those connections should be left open. io.EOF means + // the connection was closed before handshaking completed, which can + // happen naturally from probers. Return these errors directly. + if err == credentials.ErrConnDispatched || err == io.EOF { + return nil, err + } + return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + } + } writeBufSize := config.WriteBufferSize readBufSize := config.ReadBufferSize maxHeaderListSize := defaultServerMaxHeaderListSize @@ -206,18 +231,24 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime } + done := make(chan struct{}) t := &http2Server{ - ctx: context.Background(), + ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), - authInfo: config.AuthInfo, + authInfo: authInfo, framer: framer, readerDone: make(chan struct{}), writerDone: make(chan struct{}), @@ -249,12 +280,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err connBegin := &stats.ConnBegin{} t.stats.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + if err != nil { + return nil, err } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) - t.framer.writer.Flush() defer func() { @@ -266,6 +297,14 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err // Check the validity of client preface. preface := make([]byte, len(clientPreface)) if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is + // closed immediately by the latter. Returning io.EOF here allows the + // grpc server implementation to recognize this scenario and suppress + // logging to reduce spam. + if err == io.EOF { + return nil, io.EOF + } return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) } if !bytes.Equal(preface, clientPreface) { @@ -295,6 +334,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err } } t.conn.Close() + t.controlBuf.finish() close(t.writerDone) }() go t.keepalive() @@ -303,38 +343,145 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err // operateHeader takes action on the decoded headers. func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() + streamID := frame.Header().StreamID - state := &decodeState{ - serverSide: true, - } - if h2code, err := state.decodeHeader(frame); err != nil { - if _, ok := status.FromError(err); ok { - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: h2code, - onWrite: func() {}, - }) - } + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) return false } + if streamID%2 != 1 || streamID <= t.maxStreamID { + // illegal gRPC stream id. + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + } + return true + } + t.maxStreamID = streamID + buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - recvCompress: state.data.encoding, - method: state.data.method, - contentSubtype: state.data.contentSubtype, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + } + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = false + mdata = make(map[string][]string) + httpMethod string + // headerError is set if an error is encountered while parsing the headers + headerError bool + + timeoutSet bool + timeout time.Duration + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": + httpMethod = hf.Value + case ":path": + s.method = hf.Value + case "grpc-timeout": + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { + headerError = true + } + // "Transports must consider requests containing the Connection header + // as malformed." - A41 + case "connection": + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") + } + headerError = true + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = true + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + // "If multiple Host headers or multiple :authority headers are present, the + // request must be rejected with an HTTP status code 400 as required by Host + // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM + // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 + // error, this takes precedence over a client not speaking gRPC. + if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { + errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) + if logger.V(logLevel) { + logger.Errorf("transport: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 400, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + return false + } + + if !isGRPC || headerError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + return false + } + + // "If :authority is missing, Host must be renamed to :authority." - A41 + if len(mdata[":authority"]) == 0 { + // No-op if host isn't present, no eventual :authority header is a valid + // RPC. + if host, ok := mdata["host"]; ok { + mdata[":authority"] = host + delete(mdata, "host") + } + } else { + // "If :authority is present, Host must be discarded" - A41 + delete(mdata, "host") } + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - if state.data.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) + if timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } @@ -347,14 +494,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx = peer.NewContext(s.ctx, pr) // Attach the received metadata to the context. - if len(state.data.mdata) > 0 { - s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) - } - if state.data.statsTags != nil { - s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) - } - if state.data.statsTrace != nil { - s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) + } + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) + } } t.mu.Lock() if t.state != reachable { @@ -373,33 +520,25 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.cancel() return false } - if streamID%2 != 1 || streamID <= t.maxStreamID { - t.mu.Unlock() - // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) - } - s.cancel() - return true - } - t.maxStreamID = streamID - if state.data.httpMethod != http.MethodPost { + if httpMethod != http.MethodPost { t.mu.Unlock() + errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) if logger.V(logLevel) { - logger.Warningf("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", state.data.httpMethod) + logger.Infof("transport: %v", errMsg) } - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeProtocol, - onWrite: func() {}, + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) s.cancel() return false } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: state.data.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { t.mu.Unlock() if logger.V(logLevel) { logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) @@ -409,9 +548,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( stat = status.New(codes.PermissionDenied, err.Error()) } t.controlBuf.put(&earlyAbortStream{ + httpStatus: 200, streamID: s.id, contentSubtype: s.contentSubtype, status: stat, + rst: !frame.StreamEnded(), }) return false } @@ -437,7 +578,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), - Header: metadata.MD(state.data.mdata).Copy(), + Header: metadata.MD(mdata).Copy(), } t.stats.HandleRPC(s.ctx, inHeader) } @@ -649,7 +790,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) { s.write(recvMsg{buffer: buffer}) } } - if f.Header().Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { // Received the end of stream from the client. s.compareAndSwapState(streamActive, streamReadDone) s.write(recvMsg{err: io.EOF}) @@ -793,11 +934,25 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { + if s.updateHeaderSent() { return ErrIllegalHeaderWrite } + + if s.getState() == streamDone { + return t.streamContextErr(s) + } + s.hdrMu.Lock() if md.Len() > 0 { if s.header.Len() > 0 { @@ -808,7 +963,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } if err := t.writeHeaderLocked(s); err != nil { s.hdrMu.Unlock() - return err + return status.Convert(err).Err() } s.hdrMu.Unlock() return nil @@ -924,23 +1079,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } } df := &dataFrame{ @@ -950,12 +1094,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } return t.controlBuf.put(df) } @@ -1004,12 +1143,12 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.drain(http2.ErrCodeNo, []byte{}) + t.Drain() return } idleTimer.Reset(val) case <-ageTimer.C: - t.drain(http2.ErrCodeNo, []byte{}) + t.Drain() ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: @@ -1063,11 +1202,11 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() error { +func (t *http2Server) Close() { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return errors.New("transport: Close() was already called") + return } t.state = closing streams := t.activeStreams @@ -1075,10 +1214,10 @@ func (t *http2Server) Close() error { t.mu.Unlock() t.controlBuf.finish() close(t.done) - err := t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) + if err := t.conn.Close(); err != nil && logger.V(logLevel) { + logger.Infof("transport: error closing conn during Close: %v", err) } + channelz.RemoveEntry(t.channelzID) // Cancel all active streams. for _, s := range streams { s.cancel() @@ -1087,15 +1226,10 @@ func (t *http2Server) Close() error { connEnd := &stats.ConnEnd{} t.stats.HandleConn(t.ctx, connEnd) } - return err } // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1117,6 +1251,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. @@ -1136,6 +1275,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.swapState(streamDone) t.deleteStream(s, eosReceived) @@ -1152,17 +1296,13 @@ func (t *http2Server) RemoteAddr() net.Addr { } func (t *http2Server) Drain() { - t.drain(http2.ErrCodeNo, []byte{}) -} - -func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { t.mu.Lock() defer t.mu.Unlock() if t.drainChan != nil { return } t.drainChan = make(chan struct{}) - t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1170,20 +1310,23 @@ var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} // Handles outgoing GoAway and returns true if loopy needs to put itself // in draining mode. func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.maxStreamMu.Lock() t.mu.Lock() if t.state == closing { // TODO(mmukhi): This seems unnecessary. t.mu.Unlock() + t.maxStreamMu.Unlock() // The transport is closing. return false, ErrConnClosing } - sid := t.maxStreamID if !g.headsUp { // Stop accepting more streams now. t.state = draining + sid := t.maxStreamID if len(t.activeStreams) == 0 { g.closeConn = true } t.mu.Unlock() + t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } @@ -1196,6 +1339,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return true, nil } t.mu.Unlock() + t.maxStreamMu.Unlock() // For a graceful close, send out a GoAway with stream ID of MaxUInt32, // Follow that with a ping and wait for the ack to come back or a timer // to expire. During this time accept new streams since they might have @@ -1280,3 +1424,18 @@ func getJitter(v time.Duration) time.Duration { j := grpcrand.Int63n(2*r) - r return time.Duration(j) } + +type connectionKey struct{} + +// GetConnection gets the connection from the context. +func GetConnection(ctx context.Context) net.Conn { + conn, _ := ctx.Value(connectionKey{}).(net.Conn) + return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. This also +// allows any unary or streaming interceptors to see the connection. +func setConnection(ctx context.Context, conn net.Conn) context.Context { + return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index c7dee140c..d8247bcdf 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -39,7 +39,6 @@ import ( spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -96,53 +95,6 @@ var ( logger = grpclog.Component("transport") ) -type parsedHeaderData struct { - encoding string - // statusGen caches the stream status received from the trailer the server - // sent. Client side only. Do not access directly. After all trailers are - // parsed, use the status method to retrieve the status. - statusGen *status.Status - // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not - // intended for direct access outside of parsing. - rawStatusCode *int - rawStatusMsg string - httpStatus *int - // Server side only fields. - timeoutSet bool - timeout time.Duration - method string - httpMethod string - // key-value metadata map from the peer. - mdata map[string][]string - statsTags []byte - statsTrace []byte - contentSubtype string - - // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). - // - // We are in gRPC mode (peer speaking gRPC) if: - // * We are client side and have already received a HEADER frame that indicates gRPC peer. - // * The header contains valid a content-type, i.e. a string starts with "application/grpc" - // And we should handle error specific to gRPC. - // - // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we - // are in HTTP fallback mode, and should handle error specific to HTTP. - isGRPC bool - grpcErr error - httpErr error - contentTypeErr string -} - -// decodeState configures decoding criteria and records the decoded data. -type decodeState struct { - // whether decoding on server side or not - serverSide bool - - // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS - // frame once decodeHeader function has been invoked and returned. - data parsedHeaderData -} - // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -180,14 +132,6 @@ func isWhitelistedHeader(hdr string) bool { } } -func (d *decodeState) status() *status.Status { - if d.data.statusGen == nil { - // No status-details were provided; generate status using code/msg. - d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) - } - return d.data.statusGen -} - const binHdrSuffix = "-bin" func encodeBinHeader(v []byte) string { @@ -217,168 +161,16 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) (http2.ErrCode, error) { - // frame.Truncated is set to true when framer detects that the current header - // list size hits MaxHeaderListSize limit. - if frame.Truncated { - return http2.ErrCodeFrameSize, status.Error(codes.Internal, "peer header list size exceeded limit") - } - - for _, hf := range frame.Fields { - d.processHeaderField(hf) - } - - if d.data.isGRPC { - if d.data.grpcErr != nil { - return http2.ErrCodeProtocol, d.data.grpcErr - } - if d.serverSide { - return http2.ErrCodeNo, nil - } - if d.data.rawStatusCode == nil && d.data.statusGen == nil { - // gRPC status doesn't exist. - // Set rawStatusCode to be unknown and return nil error. - // So that, if the stream has ended this Unknown status - // will be propagated to the user. - // Otherwise, it will be ignored. In which case, status from - // a later trailer, that has StreamEnded flag set, is propagated. - code := int(codes.Unknown) - d.data.rawStatusCode = &code - } - return http2.ErrCodeNo, nil - } - - // HTTP fallback mode - if d.data.httpErr != nil { - return http2.ErrCodeProtocol, d.data.httpErr - } - - var ( - code = codes.Internal // when header does not include HTTP status, return INTERNAL - ok bool - ) - - if d.data.httpStatus != nil { - code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] - if !ok { - code = codes.Unknown - } - } - - return http2.ErrCodeProtocol, status.Error(code, d.constructHTTPErrMsg()) -} - -// constructErrMsg constructs error message to be returned in HTTP fallback mode. -// Format: HTTP status code and its corresponding message + content-type error message. -func (d *decodeState) constructHTTPErrMsg() string { - var errMsgs []string - - if d.data.httpStatus == nil { - errMsgs = append(errMsgs, "malformed header: missing HTTP status") - } else { - errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) - } - - if d.data.contentTypeErr == "" { - errMsgs = append(errMsgs, "transport: missing content-type field") - } else { - errMsgs = append(errMsgs, d.data.contentTypeErr) - } - - return strings.Join(errMsgs, "; ") -} - -func (d *decodeState) addMetadata(k, v string) { - if d.data.mdata == nil { - d.data.mdata = make(map[string][]string) +func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { + v, err := decodeBinHeader(rawDetails) + if err != nil { + return nil, err } - d.data.mdata[k] = append(d.data.mdata[k], v) -} - -func (d *decodeState) processHeaderField(f hpack.HeaderField) { - switch f.Name { - case "content-type": - contentSubtype, validContentType := grpcutil.ContentSubtype(f.Value) - if !validContentType { - d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) - return - } - d.data.contentSubtype = contentSubtype - // TODO: do we want to propagate the whole content-type in the metadata, - // or come up with a way to just propagate the content-subtype if it was set? - // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} - // in the metadata? - d.addMetadata(f.Name, f.Value) - d.data.isGRPC = true - case "grpc-encoding": - d.data.encoding = f.Value - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) - return - } - d.data.rawStatusCode = &code - case "grpc-message": - d.data.rawStatusMsg = decodeGrpcMessage(f.Value) - case "grpc-status-details-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - s := &spb.Status{} - if err := proto.Unmarshal(v, s); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - d.data.statusGen = status.FromProto(s) - case "grpc-timeout": - d.data.timeoutSet = true - var err error - if d.data.timeout, err = decodeTimeout(f.Value); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) - } - case ":path": - d.data.method = f.Value - case ":status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) - return - } - d.data.httpStatus = &code - case "grpc-tags-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) - return - } - d.data.statsTags = v - d.addMetadata(f.Name, string(v)) - case "grpc-trace-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) - return - } - d.data.statsTrace = v - d.addMetadata(f.Name, string(v)) - case ":method": - d.data.httpMethod = f.Value - default: - if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { - break - } - v, err := decodeMetadataHeader(f.Name, f.Value) - if err != nil { - if logger.V(logLevel) { - logger.Errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) - } - return - } - d.addMetadata(f.Name, v) + st := &spb.Status{} + if err = proto.Unmarshal(v, st); err != nil { + return nil, err } + return status.FromProto(st), nil } type timeoutUnit uint8 diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go index 96967428b..c11b52782 100644 --- a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -17,7 +17,7 @@ */ // Package networktype declares the network type to be used in the default -// dailer. Attribute of a resolver.Address. +// dialer. Attribute of a resolver.Address. package networktype import ( @@ -31,7 +31,7 @@ const key = keyType("grpc.internal.transport.networktype") // Set returns a copy of the provided address with attributes containing networkType. func Set(address resolver.Address, networkType string) resolver.Address { - address.Attributes = address.Attributes.WithValues(key, networkType) + address.Attributes = address.Attributes.WithValue(key, networkType) return address } diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index a662bf39a..415961987 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -37,7 +37,7 @@ var ( httpProxyFromEnvironment = http.ProxyFromEnvironment ) -func mapAddress(ctx context.Context, address string) (*url.URL, error) { +func mapAddress(address string) (*url.URL, error) { req := &http.Request{ URL: &url.URL{ Scheme: "https", @@ -114,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // connection. func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { newAddr := addr - proxyURL, err := mapAddress(ctx, addr) + proxyURL, err := mapAddress(addr) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 6cc1031fd..a9ce717f1 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -30,9 +30,11 @@ import ( "net" "sync" "sync/atomic" + "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -518,7 +520,8 @@ const ( // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { MaxStreams uint32 - AuthInfo credentials.AuthInfo + ConnectionTimeout time.Duration + Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle StatsHandler stats.Handler KeepaliveParams keepalive.ServerParameters @@ -527,17 +530,11 @@ type ServerConfig struct { InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 } -// NewServerTransport creates a ServerTransport with conn or non-nil error -// if it fails. -func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { - return newHTTP2Server(conn, config) -} - // ConnectOptions covers all relevant options for communicating with the server. type ConnectOptions struct { // UserAgent is the application user agent. @@ -567,7 +564,7 @@ type ConnectOptions struct { // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. @@ -694,7 +691,7 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() error + Close() // RemoteAddr returns the remote network address. RemoteAddr() net.Addr @@ -745,6 +742,12 @@ func (e ConnectionError) Origin() error { return e.err } +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go index 3677c3f04..e8b492774 100644 --- a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go +++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go @@ -28,7 +28,7 @@ type handshakeClusterNameKey struct{} // SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field // is updated with the cluster name. func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(handshakeClusterNameKey{}, clusterName) + addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) return addr } diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index e4cbea917..8e0f6abe8 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -93,12 +93,16 @@ func (md MD) Copy() MD { } // Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. func (md MD) Get(k string) []string { k = strings.ToLower(k) return md[k] } // Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. func (md MD) Set(k string, vals ...string) { if len(vals) == 0 { return @@ -107,7 +111,10 @@ func (md MD) Set(k string, vals ...string) { md[k] = vals } -// Append adds the values to key k, not overwriting what was already stored at that key. +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. func (md MD) Append(k string, vals ...string) { if len(vals) == 0 { return @@ -116,9 +123,17 @@ func (md MD) Append(k string, vals ...string) { md[k] = append(md[k], vals...) } +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { + k = strings.ToLower(k) + delete(md, k) +} + // Join joins any number of mds into a single MD. -// The order of values for each key is determined by the order in which -// the mds containing those values are presented to Join. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. func Join(mds ...MD) MD { out := MD{} for _, md := range mds { @@ -145,8 +160,8 @@ func NewOutgoingContext(ctx context.Context, md MD) context.Context { } // AppendToOutgoingContext returns a new context with the provided kv merged -// with any existing metadata in the context. Please refer to the -// documentation of Pairs for a description of kv. +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) @@ -159,20 +174,36 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } -// FromIncomingContext returns the incoming metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. -func FromIncomingContext(ctx context.Context) (md MD, ok bool) { - md, ok = ctx.Value(mdIncomingKey{}).(MD) - return +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := MD{} + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + s := make([]string, len(v)) + copy(s, v) + out[key] = s + } + return out, true } -// FromOutgoingContextRaw returns the un-merged, intermediary contents -// of rawMD. Remember to perform strings.ToLower on the keys. The returned -// MD should not be modified. Writing to it may cause races. Modification -// should be made to copies of the returned MD. +// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). // -// This is intended for gRPC-internal use ONLY. +// This is intended for gRPC-internal use ONLY. Users should use +// FromOutgoingContext instead. func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { @@ -182,16 +213,25 @@ func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { return raw.md, raw.added, true } -// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. func FromOutgoingContext(ctx context.Context) (MD, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, false } - out := raw.md.Copy() + out := MD{} + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + s := make([]string, len(v)) + copy(s, v) + out[key] = s + } for _, added := range raw.added { if len(added)%2 == 1 { panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index a58174b6f..843633c91 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -131,7 +131,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } if _, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. - return nil, nil, err + return nil, nil, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -144,10 +144,10 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. acw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { - logger.Error("subconn returned from pick is not *acBalancerWrapper") + logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } - if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if t := acw.getAddrConn().getReadyTransport(); t != nil { if channelz.IsOn() { return t, doneChannelzWrapper(acw, pickResult.Done), nil } @@ -175,3 +175,9 @@ func (pw *pickerWrapper) close() { pw.done = true close(pw.blockingCh) } + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index b858c2a5e..fb7a99e0a 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -44,77 +44,107 @@ func (*pickfirstBuilder) Name() string { } type pickfirstBalancer struct { - state connectivity.State - cc balancer.ClientConn - sc balancer.SubConn + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn } func (b *pickfirstBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - // Set a failing picker if we don't have a good picker. - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) - } if logger.V(2) { logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } -func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { - if len(cs.ResolverState.Addresses) == 0 { +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if len(state.ResolverState.Addresses) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Remove the old subConn. All addresses were removed, so it is no longer + // valid. + b.cc.RemoveSubConn(b.subConn) + b.subConn = nil + } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if b.sc == nil { - var err error - b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) - if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, - }) - return balancer.ErrBadResolverState + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + return nil + } + + subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) - b.sc.Connect() - } else { - b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) - b.sc.Connect() + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + }) + b.subConn.Connect() return nil } -func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) } - if b.sc != sc { + if b.subConn != subConn { if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") } return } - b.state = s.ConnectivityState - if s.ConnectivityState == connectivity.Shutdown { - b.sc = nil + b.state = state.ConnectivityState + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil return } - switch s.ConnectivityState { - case connectivity.Ready, connectivity.Idle: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + switch state.ConnectivityState { + case connectivity.Ready: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) case connectivity.Connecting: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + case connectivity.Idle: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ - ConnectivityState: s.ConnectivityState, - Picker: &picker{err: s.ConnectionError}, + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, }) } } @@ -122,15 +152,32 @@ func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.S func (b *pickfirstBalancer) Close() { } +func (b *pickfirstBalancer) ExitIdle() { + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() + } +} + type picker struct { result balancer.PickResult err error } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + subConn balancer.SubConn +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + func init() { balancer.Register(newPickfirstBuilder()) } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index 7d05c14eb..4e6a6b1a8 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: reflection/grpc_reflection_v1alpha/reflection.proto diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index 82a5ba7f2..81344abd7 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -37,21 +37,17 @@ To register server reflection on a gRPC server: package reflection // import "google.golang.org/grpc/reflection" import ( - "bytes" - "compress/gzip" - "fmt" "io" - "io/ioutil" - "reflect" "sort" - "sync" - "github.com/golang/protobuf/proto" - dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" "google.golang.org/grpc" "google.golang.org/grpc/codes" rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" ) // GRPCServer is the interface provided by a gRPC server. It is implemented by @@ -59,339 +55,174 @@ import ( // as a registry, for accumulating the services exposed by the server. type GRPCServer interface { grpc.ServiceRegistrar - GetServiceInfo() map[string]grpc.ServiceInfo + ServiceInfoProvider } var _ GRPCServer = (*grpc.Server)(nil) -type serverReflectionServer struct { - rpb.UnimplementedServerReflectionServer - s GRPCServer - - initSymbols sync.Once - serviceNames []string - symbols map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files -} - // Register registers the server reflection service on the given gRPC server. func Register(s GRPCServer) { - rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ - s: s, - }) + svr := NewServer(ServerOptions{Services: s}) + rpb.RegisterServerReflectionServer(s, svr) } -// protoMessage is used for type assertion on proto messages. -// Generated proto message implements function Descriptor(), but Descriptor() -// is not part of interface proto.Message. This interface is needed to -// call Descriptor(). -type protoMessage interface { - Descriptor() ([]byte, []int) -} - -func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) { - s.initSymbols.Do(func() { - serviceInfo := s.s.GetServiceInfo() - - s.symbols = map[string]*dpb.FileDescriptorProto{} - s.serviceNames = make([]string, 0, len(serviceInfo)) - processed := map[string]struct{}{} - for svc, info := range serviceInfo { - s.serviceNames = append(s.serviceNames, svc) - fdenc, ok := parseMetadata(info.Metadata) - if !ok { - continue - } - fd, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - s.processFile(fd, processed) - } - sort.Strings(s.serviceNames) - }) - - return s.serviceNames, s.symbols -} - -func (s *serverReflectionServer) processFile(fd *dpb.FileDescriptorProto, processed map[string]struct{}) { - filename := fd.GetName() - if _, ok := processed[filename]; ok { - return - } - processed[filename] = struct{}{} - - prefix := fd.GetPackage() - - for _, msg := range fd.MessageType { - s.processMessage(fd, prefix, msg) - } - for _, en := range fd.EnumType { - s.processEnum(fd, prefix, en) - } - for _, ext := range fd.Extension { - s.processField(fd, prefix, ext) - } - for _, svc := range fd.Service { - svcName := fqn(prefix, svc.GetName()) - s.symbols[svcName] = fd - for _, meth := range svc.Method { - name := fqn(svcName, meth.GetName()) - s.symbols[name] = fd - } - } - - for _, dep := range fd.Dependency { - fdenc := proto.FileDescriptor(dep) - fdDep, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - s.processFile(fdDep, processed) - } -} - -func (s *serverReflectionServer) processMessage(fd *dpb.FileDescriptorProto, prefix string, msg *dpb.DescriptorProto) { - msgName := fqn(prefix, msg.GetName()) - s.symbols[msgName] = fd - - for _, nested := range msg.NestedType { - s.processMessage(fd, msgName, nested) - } - for _, en := range msg.EnumType { - s.processEnum(fd, msgName, en) - } - for _, ext := range msg.Extension { - s.processField(fd, msgName, ext) - } - for _, fld := range msg.Field { - s.processField(fd, msgName, fld) - } - for _, oneof := range msg.OneofDecl { - oneofName := fqn(msgName, oneof.GetName()) - s.symbols[oneofName] = fd - } -} - -func (s *serverReflectionServer) processEnum(fd *dpb.FileDescriptorProto, prefix string, en *dpb.EnumDescriptorProto) { - enName := fqn(prefix, en.GetName()) - s.symbols[enName] = fd - - for _, val := range en.Value { - valName := fqn(enName, val.GetName()) - s.symbols[valName] = fd - } -} - -func (s *serverReflectionServer) processField(fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto) { - fldName := fqn(prefix, fld.GetName()) - s.symbols[fldName] = fd -} - -func fqn(prefix, name string) string { - if prefix == "" { - return name - } - return prefix + "." + name -} - -// fileDescForType gets the file descriptor for the given type. -// The given type should be a proto message. -func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - enc, _ := m.Descriptor() - - return decodeFileDesc(enc) -} - -// decodeFileDesc does decompression and unmarshalling on the given -// file descriptor byte slice. -func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) { - raw, err := decompress(enc) - if err != nil { - return nil, fmt.Errorf("failed to decompress enc: %v", err) - } - - fd := new(dpb.FileDescriptorProto) - if err := proto.Unmarshal(raw, fd); err != nil { - return nil, fmt.Errorf("bad descriptor: %v", err) - } - return fd, nil +// ServiceInfoProvider is an interface used to retrieve metadata about the +// services to expose. +// +// The reflection service is only interested in the service names, but the +// signature is this way so that *grpc.Server implements it. So it is okay +// for a custom implementation to return zero values for the +// grpc.ServiceInfo values in the map. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServiceInfoProvider interface { + GetServiceInfo() map[string]grpc.ServiceInfo } -// decompress does gzip decompression. -func decompress(b []byte) ([]byte, error) { - r, err := gzip.NewReader(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("bad gzipped descriptor: %v", err) - } - out, err := ioutil.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("bad gzipped descriptor: %v", err) - } - return out, nil +// ExtensionResolver is the interface used to query details about extensions. +// This interface is satisfied by protoregistry.GlobalTypes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ExtensionResolver interface { + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) } -func typeForName(name string) (reflect.Type, error) { - pt := proto.MessageType(name) - if pt == nil { - return nil, fmt.Errorf("unknown type: %q", name) - } - st := pt.Elem() - - return st, nil +// ServerOptions represents the options used to construct a reflection server. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerOptions struct { + // The source of advertised RPC services. If not specified, the reflection + // server will report an empty list when asked to list services. + // + // This value will typically be a *grpc.Server. But the set of advertised + // services can be customized by wrapping a *grpc.Server or using an + // alternate implementation that returns a custom set of service names. + Services ServiceInfoProvider + // Optional resolver used to load descriptors. If not specified, + // protoregistry.GlobalFiles will be used. + DescriptorResolver protodesc.Resolver + // Optional resolver used to query for known extensions. If not specified, + // protoregistry.GlobalTypes will be used. + ExtensionResolver ExtensionResolver } -func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) +// NewServer returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) rpb.ServerReflectionServer { + if opts.DescriptorResolver == nil { + opts.DescriptorResolver = protoregistry.GlobalFiles + } + if opts.ExtensionResolver == nil { + opts.ExtensionResolver = protoregistry.GlobalTypes + } + return &serverReflectionServer{ + s: opts.Services, + descResolver: opts.DescriptorResolver, + extResolver: opts.ExtensionResolver, } - - var extDesc *proto.ExtensionDesc - for id, desc := range proto.RegisteredExtensions(m) { - if id == ext { - extDesc = desc - break - } - } - - if extDesc == nil { - return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext) - } - - return decodeFileDesc(proto.FileDescriptor(extDesc.Filename)) } -func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - - exts := proto.RegisteredExtensions(m) - out := make([]int32, 0, len(exts)) - for id := range exts { - out = append(out, id) - } - return out, nil +type serverReflectionServer struct { + rpb.UnimplementedServerReflectionServer + s ServiceInfoProvider + descResolver protodesc.Resolver + extResolver ExtensionResolver } // fileDescWithDependencies returns a slice of serialized fileDescriptors in // wire format ([]byte). The fileDescriptors will include fd and all the // transitive dependencies of fd with names not in sentFileDescriptors. -func fileDescWithDependencies(fd *dpb.FileDescriptorProto, sentFileDescriptors map[string]bool) ([][]byte, error) { - r := [][]byte{} - queue := []*dpb.FileDescriptorProto{fd} +func (s *serverReflectionServer) fileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { + var r [][]byte + queue := []protoreflect.FileDescriptor{fd} for len(queue) > 0 { currentfd := queue[0] queue = queue[1:] - if sent := sentFileDescriptors[currentfd.GetName()]; len(r) == 0 || !sent { - sentFileDescriptors[currentfd.GetName()] = true - currentfdEncoded, err := proto.Marshal(currentfd) + if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { + sentFileDescriptors[currentfd.Path()] = true + fdProto := protodesc.ToFileDescriptorProto(currentfd) + currentfdEncoded, err := proto.Marshal(fdProto) if err != nil { return nil, err } r = append(r, currentfdEncoded) } - for _, dep := range currentfd.Dependency { - fdenc := proto.FileDescriptor(dep) - fdDep, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - queue = append(queue, fdDep) + for i := 0; i < currentfd.Imports().Len(); i++ { + queue = append(queue, currentfd.Imports().Get(i)) } } return r, nil } -// fileDescEncodingByFilename finds the file descriptor for given filename, -// finds all of its previously unsent transitive dependencies, does marshalling -// on them, and returns the marshalled result. -func (s *serverReflectionServer) fileDescEncodingByFilename(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - enc := proto.FileDescriptor(name) - if enc == nil { - return nil, fmt.Errorf("unknown file: %v", name) - } - fd, err := decodeFileDesc(enc) - if err != nil { - return nil, err - } - return fileDescWithDependencies(fd, sentFileDescriptors) -} - -// parseMetadata finds the file descriptor bytes specified meta. -// For SupportPackageIsVersion4, m is the name of the proto file, we -// call proto.FileDescriptor to get the byte slice. -// For SupportPackageIsVersion3, m is a byte slice itself. -func parseMetadata(meta interface{}) ([]byte, bool) { - // Check if meta is the file name. - if fileNameForMeta, ok := meta.(string); ok { - return proto.FileDescriptor(fileNameForMeta), true - } - - // Check if meta is the byte slice. - if enc, ok := meta.([]byte); ok { - return enc, true - } - - return nil, false -} - // fileDescEncodingContainingSymbol finds the file descriptor containing the // given symbol, finds all of its previously unsent transitive dependencies, // does marshalling on them, and returns the marshalled result. The given symbol // can be a type, a service or a method. func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - _, symbols := s.getSymbols() - fd := symbols[name] - if fd == nil { - // Check if it's a type name that was not present in the - // transitive dependencies of the registered services. - if st, err := typeForName(name); err == nil { - fd, err = s.fileDescForType(st) - if err != nil { - return nil, err - } - } - } - - if fd == nil { - return nil, fmt.Errorf("unknown symbol: %v", name) + d, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)) + if err != nil { + return nil, err } - - return fileDescWithDependencies(fd, sentFileDescriptors) + return s.fileDescWithDependencies(d.ParentFile(), sentFileDescriptors) } // fileDescEncodingContainingExtension finds the file descriptor containing // given extension, finds all of its previously unsent transitive dependencies, // does marshalling on them, and returns the marshalled result. func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { - st, err := typeForName(typeName) - if err != nil { - return nil, err - } - fd, err := fileDescContainingExtension(st, extNum) + xt, err := s.extResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) if err != nil { return nil, err } - return fileDescWithDependencies(fd, sentFileDescriptors) + return s.fileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) } // allExtensionNumbersForTypeName returns all extension numbers for the given type. func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { - st, err := typeForName(name) - if err != nil { - return nil, err + var numbers []int32 + s.extResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { + numbers = append(numbers, int32(xt.TypeDescriptor().Number())) + return true + }) + sort.Slice(numbers, func(i, j int) bool { + return numbers[i] < numbers[j] + }) + if len(numbers) == 0 { + // maybe return an error if given type name is not known + if _, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { + return nil, err + } } - extNums, err := s.allExtensionNumbersForType(st) - if err != nil { - return nil, err + return numbers, nil +} + +// listServices returns the names of services this server exposes. +func (s *serverReflectionServer) listServices() []*rpb.ServiceResponse { + serviceInfo := s.s.GetServiceInfo() + resp := make([]*rpb.ServiceResponse, 0, len(serviceInfo)) + for svc := range serviceInfo { + resp = append(resp, &rpb.ServiceResponse{Name: svc}) } - return extNums, nil + sort.Slice(resp, func(i, j int) bool { + return resp[i].Name < resp[j].Name + }) + return resp } // ServerReflectionInfo is the reflection service handler. @@ -412,7 +243,11 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio } switch req := in.MessageRequest.(type) { case *rpb.ServerReflectionRequest_FileByFilename: - b, err := s.fileDescEncodingByFilename(req.FileByFilename, sentFileDescriptors) + var b [][]byte + fd, err := s.descResolver.FindFileByPath(req.FileByFilename) + if err == nil { + b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) + } if err != nil { out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ ErrorResponse: &rpb.ErrorResponse{ @@ -473,16 +308,9 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio } } case *rpb.ServerReflectionRequest_ListServices: - svcNames, _ := s.getSymbols() - serviceResponses := make([]*rpb.ServiceResponse, len(svcNames)) - for i, n := range svcNames { - serviceResponses[i] = &rpb.ServiceResponse{ - Name: n, - } - } out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ ListServicesResponse: &rpb.ListServiceResponse{ - Service: serviceResponses, + Service: s.listServices(), }, } default: diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index dfd3226a1..978b89f37 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH} mkdir -p ${GOBIN} echo "remove existing generated files" -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" (cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) @@ -76,7 +76,21 @@ SOURCES=( # These options of the form 'Mfoo.proto=bar' instruct the codegen to use an # import path of 'bar' in the generated code when 'foo.proto' is imported in # one of the sources. -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core +# +# Note that the protos listed here are all for testing purposes. All protos to +# be used externally should have a go_package option (and they don't need to be +# listed here). +OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\ +Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing for src in ${SOURCES[@]}; do echo "protoc ${src}" @@ -85,7 +99,6 @@ for src in ${SOURCES[@]}; do -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ -I${WORKDIR}/protobuf/src \ - -I${WORKDIR}/istio \ ${src} done @@ -96,18 +109,17 @@ for src in ${LEGACY_SOURCES[@]}; do -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ -I${WORKDIR}/protobuf/src \ - -I${WORKDIR}/istio \ ${src} done # The go_package option in grpc/lookup/v1/rls.proto doesn't match the # current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go # grpc/service_config/service_config.proto does not have a go_package option. mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go new file mode 100644 index 000000000..e87ecd0ee --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +type addressMapEntry struct { + addr Address + value interface{} +} + +// AddressMap is a map of addresses to arbitrary values taking into account +// Attributes. BalancerAttributes are ignored, as are Metadata and Type. +// Multiple accesses may not be performed concurrently. Must be created via +// NewAddressMap; do not construct directly. +type AddressMap struct { + m map[string]addressMapEntryList +} + +type addressMapEntryList []*addressMapEntry + +// NewAddressMap creates a new AddressMap. +func NewAddressMap() *AddressMap { + return &AddressMap{m: make(map[string]addressMapEntryList)} +} + +// find returns the index of addr in the addressMapEntry slice, or -1 if not +// present. +func (l addressMapEntryList) find(addr Address) int { + if len(l) == 0 { + return -1 + } + for i, entry := range l { + if entry.addr.ServerName == addr.ServerName && + entry.addr.Attributes.Equal(addr.Attributes) { + return i + } + } + return -1 +} + +// Get returns the value for the address in the map, if present. +func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { + entryList := a.m[addr.Addr] + if entry := entryList.find(addr); entry != -1 { + return entryList[entry].value, true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (a *AddressMap) Set(addr Address, value interface{}) { + entryList := a.m[addr.Addr] + if entry := entryList.find(addr); entry != -1 { + a.m[addr.Addr][entry].value = value + return + } + a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value}) +} + +// Delete removes addr from the map. +func (a *AddressMap) Delete(addr Address) { + entryList := a.m[addr.Addr] + entry := entryList.find(addr) + if entry == -1 { + return + } + if len(entryList) == 1 { + entryList = nil + } else { + copy(entryList[entry:], entryList[entry+1:]) + entryList = entryList[:len(entryList)-1] + } + a.m[addr.Addr] = entryList +} + +// Len returns the number of entries in the map. +func (a *AddressMap) Len() int { + ret := 0 + for _, entryList := range a.m { + ret += len(entryList) + } + return ret +} + +// Keys returns a slice of all current map keys. +func (a *AddressMap) Keys() []Address { + ret := make([]Address, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.addr) + } + } + return ret +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 6a9d234a5..ca2e35a35 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -23,9 +23,11 @@ package resolver import ( "context" "net" + "net/url" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -116,9 +118,14 @@ type Address struct { ServerName string // Attributes contains arbitrary data about this address intended for - // consumption by the load balancing policy. + // consumption by the SubConn. Attributes *attributes.Attributes + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attribes do not affect SubConn + // creation, connection establishment, handshaking, etc. + BalancerAttributes *attributes.Attributes + // Type is the type of this address. // // Deprecated: use Attributes instead. @@ -131,6 +138,20 @@ type Address struct { Metadata interface{} } +// Equal returns whether a and o are identical. Metadata is compared directly, +// not with any recursive introspection. +func (a Address) Equal(o Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && + a.Type == o.Type && a.Metadata == o.Metadata +} + +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + return pretty.ToJSON(a) +} + // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { @@ -204,25 +225,36 @@ type ClientConn interface { // Target represents a target for gRPC, as specified in: // https://github.com/grpc/grpc/blob/master/doc/naming.md. -// It is parsed from the target string that gets passed into Dial or DialContext by the user. And -// grpc passes it to the resolver and the balancer. +// It is parsed from the target string that gets passed into Dial or DialContext +// by the user. And gRPC passes it to the resolver and the balancer. // -// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will -// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed -// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// If the target follows the naming spec, and the parsed scheme is registered +// with gRPC, we will parse the target string according to the spec. If the +// target does not contain a scheme or if the parsed scheme is not registered +// (i.e. no corresponding resolver available to resolve the endpoint), we will +// apply the default scheme, and will attempt to reparse it. // -// If the target does not contain a scheme, we will apply the default scheme, and set the Target to -// be the full target string. e.g. "foo.bar" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. +// Examples: // -// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the -// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target -// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - Scheme string + // Deprecated: use URL.Scheme instead. + Scheme string + // Deprecated: use URL.Host instead. Authority string - Endpoint string + // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when + // the former is empty. + Endpoint string + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial + // target can be accessed from here. + URL url.URL } // Builder creates a resolver that will be used to watch name resolution updates. diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 4118de571..05a9d4e0b 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,7 +19,6 @@ package grpc import ( - "fmt" "strings" "sync" @@ -27,6 +26,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -39,6 +39,8 @@ type ccResolverWrapper struct { resolver resolver.Resolver done *grpcsync.Event curState resolver.State + + incomingMu sync.Mutex // Synchronizes all the incoming calls. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and @@ -90,13 +92,12 @@ func (ccr *ccResolverWrapper) close() { } func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return nil } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(s) - } + ccr.addChannelzTraceEvent(s) ccr.curState = s if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { return balancer.ErrBadResolverState @@ -105,6 +106,8 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { } func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } @@ -114,13 +117,12 @@ func (ccr *ccResolverWrapper) ReportError(err error) { // NewAddress is called by the resolver implementation to send addresses to gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) ccr.curState.Addresses = addrs ccr.cc.updateResolverState(ccr.curState, nil) } @@ -128,10 +130,12 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) if ccr.cc.dopts.disableServiceConfig { channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return @@ -141,9 +145,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) return } - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) ccr.curState.ServiceConfig = scpr ccr.cc.updateResolverState(ccr.curState, nil) } @@ -170,8 +172,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtInfo, - }) + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 6db356fa5..5d407b004 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -258,7 +258,8 @@ func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { } // WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false, the RPC will fail +// connections or unreachable servers. If waitForReady is false and the +// connection is in the TRANSIENT_FAILURE state, the RPC will fail // immediately. Otherwise, the RPC client will block the call until a // connection is available (or the call is canceled or times out) and will // retry the call if it fails due to a transient error. gRPC will not retry if @@ -711,13 +712,11 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei if err != nil { return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } - } else { - size = len(d) - } - if size > maxReceiveMessageSize { - // TODO: Revisit the error code. Currently keep it consistent with java - // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize) + if size > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } } return d, nil } @@ -828,26 +827,28 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { - if err == nil || err == io.EOF { + switch err { + case nil, io.EOF: return err - } - if err == io.ErrUnexpectedEOF { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } - if _, ok := status.FromError(err); ok { - return err - } + switch e := err.(type) { case transport.ConnectionError: return status.Error(codes.Unavailable, e.Desc) - default: - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - } + case *transport.NewStreamError: + return toRPCErr(e.Err) } + + if _, ok := status.FromError(err); ok { + return err + } + return status.Error(codes.Unknown, err.Error()) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 0a151dee4..65de84b30 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -134,7 +134,7 @@ type Server struct { channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData serverWorkerChannels []chan *serverWorkerData @@ -584,9 +584,8 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - } + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") return s } @@ -710,16 +709,9 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo { // the server being stopped. var ErrServerStopped = errors.New("grpc: the server has been stopped") -func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if s.opts.creds == nil { - return rawConn, nil, nil - } - return s.opts.creds.ServerHandshake(rawConn) -} - type listenSocket struct { net.Listener - channelzID int64 + channelzID *channelz.Identifier } func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { @@ -731,9 +723,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { func (l *listenSocket) Close() error { err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") return err } @@ -766,11 +757,6 @@ func (s *Server) Serve(lis net.Listener) error { ls := &listenSocket{Listener: lis} s.lis[ls] = true - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - } - s.mu.Unlock() - defer func() { s.mu.Lock() if s.lis != nil && s.lis[ls] { @@ -780,8 +766,16 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var tempDelay time.Duration // how long to sleep on accept failure + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { @@ -839,28 +833,14 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) - conn, authInfo, err := s.useTransportAuthenticator(rawConn) - if err != nil { - // ErrConnDispatched means that the connection was dispatched away from - // gRPC; those connections should be left open. - if err != credentials.ErrConnDispatched { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - rawConn.Close() - } - rawConn.SetDeadline(time.Time{}) - return - } // Finish handshaking (HTTP2) - st := s.newHTTP2Transport(conn, authInfo) + st := s.newHTTP2Transport(rawConn) + rawConn.SetDeadline(time.Time{}) if st == nil { return } - rawConn.SetDeadline(time.Time{}) if !s.addConn(lisAddr, st) { return } @@ -881,10 +861,11 @@ func (s *Server) drainServerTransports(addr string) { // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). -func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { +func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { config := &transport.ServerConfig{ MaxStreams: s.opts.maxConcurrentStreams, - AuthInfo: authInfo, + ConnectionTimeout: s.opts.connectionTimeout, + Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, StatsHandler: s.opts.statsHandler, KeepaliveParams: s.opts.keepaliveParams, @@ -897,13 +878,20 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, } - st, err := transport.NewServerTransport("http2", c, config) + st, err := transport.NewServerTransport(c, config) if err != nil { s.mu.Lock() s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() - c.Close() - channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. + if err != io.EOF { + channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + } + c.Close() + } return nil } @@ -1109,22 +1097,29 @@ func chainUnaryServerInterceptors(s *Server) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) - } + chainedInt = chainUnaryInterceptors(interceptors) } s.opts.unaryInt = chainedInt } -// getChainUnaryHandler recursively generate the chained UnaryHandler -func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { - if curr == len(interceptors)-1 { - return finalHandler - } - - return func(ctx context.Context, req interface{}) (interface{}, error) { - return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) +func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + // the struct ensures the variables are allocated together, rather than separately, since we + // know they should be garbage collected together. This saves 1 allocation and decreases + // time/call by about 10% on the microbenchmark. + var state struct { + i int + next UnaryHandler + } + state.next = func(ctx context.Context, req interface{}) (interface{}, error) { + if state.i == len(interceptors)-1 { + return interceptors[state.i](ctx, req, info, handler) + } + state.i++ + return interceptors[state.i-1](ctx, req, info, state.next) + } + return state.next(ctx, req) } } @@ -1138,7 +1133,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if sh != nil { beginTime := time.Now() statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: beginTime, + IsClientStream: false, + IsServerStream: false, } sh.HandleRPC(stream.Context(), statsBegin) } @@ -1287,9 +1284,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(codes.Unknown, appErr.Error()) - appStatus, _ = status.FromError(appErr) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() } if trInfo != nil { trInfo.tr.LazyLog(stringer(appStatus.Message()), true) @@ -1390,22 +1388,29 @@ func chainStreamServerInterceptors(s *Server) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) - } + chainedInt = chainStreamInterceptors(interceptors) } s.opts.streamInt = chainedInt } -// getChainStreamHandler recursively generate the chained StreamHandler -func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { - if curr == len(interceptors)-1 { - return finalHandler - } - - return func(srv interface{}, ss ServerStream) error { - return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) +func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { + return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + // the struct ensures the variables are allocated together, rather than separately, since we + // know they should be garbage collected together. This saves 1 allocation and decreases + // time/call by about 10% on the microbenchmark. + var state struct { + i int + next StreamHandler + } + state.next = func(srv interface{}, ss ServerStream) error { + if state.i == len(interceptors)-1 { + return interceptors[state.i](srv, ss, info, handler) + } + state.i++ + return interceptors[state.i-1](srv, ss, info, state.next) + } + return state.next(srv, ss) } } @@ -1418,7 +1423,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if sh != nil { beginTime := time.Now() statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: beginTime, + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, } sh.HandleRPC(stream.Context(), statsBegin) } @@ -1521,6 +1528,8 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp } } + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) } @@ -1542,7 +1551,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) appErr = appStatus.Err() } if trInfo != nil { @@ -1588,7 +1599,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() @@ -1699,11 +1710,7 @@ func (s *Server) Stop() { s.done.Fire() }() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() listeners := s.lis @@ -1741,11 +1748,7 @@ func (s *Server) GracefulStop() { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() if s.conns == nil { s.mu.Unlock() @@ -1798,12 +1801,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return codec } -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1815,8 +1832,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { return stream.SetHeader(md) } -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { @@ -1830,6 +1853,10 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 22c4240cf..b01c548bb 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -218,7 +218,7 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfigForTesting = parseServiceConfig + internal.ParseServiceConfig = parseServiceConfig } func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { @@ -381,6 +381,9 @@ func init() { // // If any of them is NOT *ServiceConfig, return false. func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } aa, ok := a.(*ServiceConfig) if !ok { return false diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 63e476ee7..0285dcc6a 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -36,15 +36,22 @@ type RPCStats interface { IsClient() bool } -// Begin contains stats when an RPC begins. +// Begin contains stats when an RPC attempt begins. // FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. Client bool - // BeginTime is the time when the RPC begins. + // BeginTime is the time when the RPC attempt begins. BeginTime time.Time // FailFast indicates if this RPC is failfast. FailFast bool + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool + // IsTransparentRetryAttempt indicates whether this attempt was initiated + // due to transparently retrying a previous attempt. + IsTransparentRetryAttempt bool } // IsClient indicates if the stats information is from client side. diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 54d187186..6d163b6e3 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -29,6 +29,7 @@ package status import ( "context" + "errors" "fmt" spb "google.golang.org/genproto/googleapis/rpc/status" @@ -73,11 +74,16 @@ func FromProto(s *spb.Status) *Status { return status.FromProto(s) } -// FromError returns a Status representing err if it was produced by this -// package or has a method `GRPCStatus() *Status`. -// If err is nil, a Status is returned with codes.OK and no message. -// Otherwise, ok is false and a Status is returned with codes.Unknown and -// the original error message. +// FromError returns a Status representation of err. +// +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status`, the appropriate Status is returned. +// +// - If err is nil, a Status is returned with codes.OK and no message. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true @@ -112,18 +118,18 @@ func Code(err error) codes.Code { return codes.Unknown } -// FromContextError converts a context error into a Status. It returns a -// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is -// non-nil and not a context error. +// FromContextError converts a context error or wrapped context error into a +// Status. It returns a Status with codes.OK if err is nil, or a Status with +// codes.Unknown if err is non-nil and not a context error. func FromContextError(err error) *Status { - switch err { - case nil: + if err == nil { return nil - case context.DeadlineExceeded: + } + if errors.Is(err, context.DeadlineExceeded) { return New(codes.DeadlineExceeded, err.Error()) - case context.Canceled: + } + if errors.Is(err, context.Canceled) { return New(codes.Canceled, err.Error()) - default: - return New(codes.Unknown, err.Error()) } + return New(codes.Unknown, err.Error()) } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 1f3e70d2c..236fc17ec 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -36,6 +36,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/transport" @@ -46,10 +47,12 @@ import ( ) // StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. If a StreamHandler returns an error, it -// should be produced by the status package, or else gRPC will use -// codes.Unknown as the status code and err.Error() as the status message -// of the RPC. +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used @@ -164,6 +167,11 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } if channelz.IsOn() { cc.incrCallsStarted() defer func() { @@ -274,33 +282,6 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client if c.creds != nil { callHdr.Creds = c.creds } - var trInfo *traceInfo - if EnableTracing { - trInfo = &traceInfo{ - tr: trace.New("grpc.Sent."+methodFamily(method), method), - firstLine: firstLine{ - client: true, - }, - } - if deadline, ok := ctx.Deadline(); ok { - trInfo.firstLine.deadline = time.Until(deadline) - } - trInfo.tr.LazyLog(&trInfo.firstLine, false) - ctx = trace.NewContext(ctx, trInfo.tr) - } - ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) - sh := cc.dopts.copts.StatsHandler - var beginTime time.Time - if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) - beginTime = time.Now() - begin := &stats.Begin{ - Client: true, - BeginTime: beginTime, - FailFast: c.failFast, - } - sh.HandleRPC(ctx, begin) - } cs := &clientStream{ callHdr: callHdr, @@ -314,7 +295,6 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cp: cp, comp: comp, cancel: cancel, - beginTime: beginTime, firstAttempt: true, onCommit: onCommit, } @@ -323,16 +303,28 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } cs.binlog = binarylog.GetMethodLogger(method) - // Only this initial attempt has stats/tracing. - // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. - if err := cs.newAttemptLocked(sh, trInfo); err != nil { + cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */) + if err != nil { cs.finish(err) return nil, err } - op := func(a *csAttempt) error { return a.newStream() } + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) return nil, err } @@ -371,63 +363,104 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client return cs, nil } -// newAttemptLocked creates a new attempt with a transport. -// If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { - newAttempt := &csAttempt{ - cs: cs, - dc: cs.cc.dopts.dc, - statsHandler: sh, - trInfo: trInfo, +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) } - defer func() { - if retErr != nil { - // This attempt is not set in the clientStream, so it's finish won't - // be called. Call it here for stats and trace in case they are not - // nil. - newAttempt.finish(retErr) + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + method := cs.callHdr.Method + sh := cs.cc.dopts.copts.StatsHandler + var beginTime time.Time + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: cs.callInfo.failFast, + IsClientStream: cs.desc.ClientStreams, + IsServerStream: cs.desc.ServerStreams, + IsTransparentRetryAttempt: isTransparent, } - }() + sh.HandleRPC(ctx, begin) + } - if err := cs.ctx.Err(); err != nil { - return toRPCErr(err) + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) } - ctx := cs.ctx if cs.cc.parsedTarget.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. - ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs( + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), )) } - t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandler: sh, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } return err } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) } - newAttempt.t = t - newAttempt.done = done - cs.attempt = newAttempt return nil } func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries - s, err := a.t.NewStream(cs.ctx, cs.callHdr) + s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - if _, ok := err.(transport.PerformedIOError); ok { - // Return without converting to an RPC error so retry code can - // inspect. + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. return err } - return toRPCErr(err) + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} + a.s = s + a.p = &parser{r: s} return nil } @@ -445,8 +478,7 @@ type clientStream struct { cancel context.CancelFunc // cancels all attempts - sentLast bool // sent an end stream - beginTime time.Time + sentLast bool // sent an end stream methodConfig *MethodConfig @@ -454,7 +486,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog *binarylog.MethodLogger // Binary logger, can be nil. + binlog binarylog.MethodLogger // Binary logger, can be nil. // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -486,6 +518,7 @@ type clientStream struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { + ctx context.Context cs *clientStream t transport.ClientTransport s *transport.Stream @@ -504,6 +537,12 @@ type csAttempt struct { trInfo *traceInfo statsHandler stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool } func (cs *clientStream) commitAttemptLocked() { @@ -521,85 +560,76 @@ func (cs *clientStream) commitAttempt() { } // shouldRetry returns nil if the RPC should be retried; otherwise it returns -// the error that should be returned by the operation. -func (cs *clientStream) shouldRetry(err error) error { - unprocessed := false - if cs.attempt.s == nil { - pioErr, ok := err.(transport.PerformedIOError) - if ok { - // Unwrap error. - err = toRPCErr(pioErr.Err) - } else { - unprocessed = true - } - if !ok && !cs.callInfo.failFast { - // In the event of a non-IO operation error from NewStream, we - // never attempted to write anything to the wire, so we can retry - // indefinitely for non-fail-fast RPCs. - return nil - } +// the error that should be returned by the operation. If the RPC should be +// retried, the bool indicates whether it is being retried transparently. +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs + + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. + return false, err } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. - return err + if a.s == nil && a.allowTransparentRetry { + return true, nil } // Wait for the trailers. - if cs.attempt.s != nil { - <-cs.attempt.s.Done() - unprocessed = cs.attempt.s.Unprocessed() + unprocessed := false + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. - return nil + return true, nil } if cs.cc.dopts.disableRetry { - return err + return false, err } pushback := 0 hasPushback := false - if cs.attempt.s != nil { - if !cs.attempt.s.TrailersOnly() { - return err + if a.s != nil { + if !a.s.TrailersOnly() { + return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.s.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } hasPushback = true } else if len(sps) > 1 { channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } } var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() + if a.s != nil { + code = a.s.Status().Code() } else { - code = status.Convert(err).Code() + code = status.Code(err) } rp := cs.methodConfig.RetryPolicy if rp == nil || !rp.RetryableStatusCodes[code] { - return err + return false, err } // Note: the ordering here is important; we count this as a failure // only if the code matched a retryable code. if cs.retryThrottler.throttle() { - return err + return false, err } if cs.numRetries+1 >= rp.MaxAttempts { - return err + return false, err } var dur time.Duration @@ -622,26 +652,32 @@ func (cs *clientStream) shouldRetry(err error) error { select { case <-t.C: cs.numRetries++ - return nil + return false, nil case <-cs.ctx.Done(): t.Stop() - return status.FromContextError(cs.ctx.Err()).Err() + return false, status.FromContextError(cs.ctx.Err()).Err() } } // Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { for { - cs.attempt.finish(lastErr) - if err := cs.shouldRetry(lastErr); err != nil { + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) + if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(nil, nil); err != nil { + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. return err } - if lastErr = cs.replayBufferLocked(); lastErr == nil { + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil } } @@ -651,7 +687,10 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - return cs.attempt.s.Context() + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx } func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { @@ -659,7 +698,11 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) for { if cs.committed { cs.mu.Unlock() - return op(cs.attempt) + // toRPCErr is used in case the error from the attempt comes from + // NewClientStream, which intentionally doesn't return a status + // error to allow for further inspection; all other errors should + // already be status errors. + return toRPCErr(op(cs.attempt)) } a := cs.attempt cs.mu.Unlock() @@ -677,7 +720,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) cs.mu.Unlock() return err } - if err := cs.retryLocked(err); err != nil { + if err := cs.retryLocked(a, err); err != nil { cs.mu.Unlock() return err } @@ -708,7 +751,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true } - return m, err + return m, nil } func (cs *clientStream) Trailer() metadata.MD { @@ -726,10 +769,9 @@ func (cs *clientStream) Trailer() metadata.MD { return cs.attempt.s.Trailer() } -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { for _, f := range cs.buffer { - if err := f(a); err != nil { + if err := f(attempt); err != nil { return err } } @@ -777,22 +819,17 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) } - msgBytes := data // Store the pointer before setting to nil. For binary logging. op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err + return a.sendMsg(m, hdr, payload, data) } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if cs.binlog != nil && err == nil { cs.binlog.Log(&binarylog.ClientMessage{ OnClientSide: true, - Message: msgBytes, + Message: data, }) } - return + return err } func (cs *clientStream) RecvMsg(m interface{}) error { @@ -924,7 +961,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { return io.EOF } if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -972,7 +1009,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { a.mu.Unlock() } if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ + a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1034,12 +1071,12 @@ func (a *csAttempt) finish(err error) { if a.statsHandler != nil { end := &stats.End{ Client: true, - BeginTime: a.cs.beginTime, + BeginTime: a.beginTime, EndTime: time.Now(), Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.cs.ctx, end) + a.statsHandler.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1344,8 +1381,10 @@ func (as *addrConnStream) finish(err error) { // ServerStream defines the server-side behavior of a streaming RPC. // -// All errors returned from ServerStream methods are compatible with the -// status package. +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. @@ -1408,7 +1447,7 @@ type serverStream struct { statsHandler stats.Handler - binlog *binarylog.MethodLogger + binlog binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1428,11 +1467,20 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) if ss.binlog != nil && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() ss.binlog.Log(&binarylog.ServerHeader{ @@ -1447,6 +1495,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } ss.s.SetTrailer(md) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index bfe5cf887..5bc03f9b3 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.38.0" +const Version = "1.47.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 1a0dbd7ee..ceb436c6c 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -32,26 +32,14 @@ PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" go version if [[ "$1" = "-install" ]]; then - # Check for module support - if go help mod >& /dev/null; then - # Install the pinned versions as defined in module tools. - pushd ./test/tools - go install \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - popd - else - # Ye olde `go get` incantation. - # Note: this gets the latest version of all tools (vs. the pinned versions - # with Go modules). - go get -u \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - fi + # Install the pinned versions as defined in module tools. + pushd ./test/tools + go install \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell + popd if [[ -z "${VET_SKIP_PROTO}" ]]; then if [[ "${TRAVIS}" = "true" ]]; then PROTOBUF_VERSION=3.14.0 @@ -101,10 +89,6 @@ not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' -# - Check imports that are illegal in appengine (until Go 1.11). -# TODO: Remove when we drop Go 1.10 support -go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go - misspell -error . # - Check that generated proto files are up to date. @@ -123,7 +107,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" go mod tidy git status --porcelain 2>&1 | fail_on_output || \ diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go index 6338c44dc..d34efa9b1 100644 --- a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go @@ -128,6 +128,9 @@ func genGeneratedHeader(gen *protogen.Plugin, g *protogen.GeneratedFile, f *file protocVersion := "(unknown)" if v := gen.Request.GetCompilerVersion(); v != nil { protocVersion = fmt.Sprintf("v%v.%v.%v", v.GetMajor(), v.GetMinor(), v.GetPatch()) + if s := v.GetSuffix(); s != "" { + protocVersion += "-" + s + } } g.P("// \tprotoc-gen-go ", protocGenGoVersion) g.P("// \tprotoc ", protocVersion) @@ -444,7 +447,15 @@ func genMessageDefaultDecls(g *protogen.GeneratedFile, f *fileInfo, m *messageIn case protoreflect.EnumKind: idx := field.Desc.DefaultEnumValue().Index() val := field.Enum.Values[idx] - consts = append(consts, fmt.Sprintf("%s = %s", name, g.QualifiedGoIdent(val.GoIdent))) + if val.GoIdent.GoImportPath == f.GoImportPath { + consts = append(consts, fmt.Sprintf("%s = %s", name, g.QualifiedGoIdent(val.GoIdent))) + } else { + // If the enum value is declared in a different Go package, + // reference it by number since the name may not be correct. + // See https://github.com/golang/protobuf/issues/513. + consts = append(consts, fmt.Sprintf("%s = %s(%d) // %s", + name, g.QualifiedGoIdent(field.Enum.GoIdent), val.Desc.Number(), g.QualifiedGoIdent(val.GoIdent))) + } case protoreflect.FloatKind, protoreflect.DoubleKind: if f := defVal.Float(); math.IsNaN(f) || math.IsInf(f, 0) { var fn, arg string @@ -527,25 +538,6 @@ func genMessageBaseMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInf g.P() f.needRawDesc = true } - - // ExtensionRangeArray method. - extRanges := m.Desc.ExtensionRanges() - if m.genExtRangeMethod && extRanges.Len() > 0 { - protoExtRange := protoifacePackage.Ident("ExtensionRangeV1") - extRangeVar := "extRange_" + m.GoIdent.GoName - g.P("var ", extRangeVar, " = []", protoExtRange, " {") - for i := 0; i < extRanges.Len(); i++ { - r := extRanges.Get(i) - g.P("{Start:", r[0], ", End:", r[1]-1 /* inclusive */, "},") - } - g.P("}") - g.P() - g.P("// Deprecated: Use ", m.GoIdent, ".ProtoReflect.Descriptor.ExtensionRanges instead.") - g.P("func (*", m.GoIdent, ") ExtensionRangeArray() []", protoExtRange, " {") - g.P("return ", extRangeVar) - g.P("}") - g.P() - } } func genMessageGetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { @@ -566,7 +558,7 @@ func genMessageGetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageI // Getter for message field. goType, pointer := fieldGoType(g, f, field) - defaultValue := fieldDefaultValue(g, m, field) + defaultValue := fieldDefaultValue(g, f, m, field) g.Annotate(m.GoIdent.GoName+".Get"+field.GoName, field.Location) leadingComments := appendDeprecationSuffix("", field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) @@ -688,7 +680,7 @@ func fieldProtobufTagValue(field *protogen.Field) string { return tag.Marshal(field.Desc, enumName) } -func fieldDefaultValue(g *protogen.GeneratedFile, m *messageInfo, field *protogen.Field) string { +func fieldDefaultValue(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, field *protogen.Field) string { if field.Desc.IsList() { return "nil" } @@ -707,7 +699,15 @@ func fieldDefaultValue(g *protogen.GeneratedFile, m *messageInfo, field *protoge case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.BytesKind: return "nil" case protoreflect.EnumKind: - return g.QualifiedGoIdent(field.Enum.Values[0].GoIdent) + val := field.Enum.Values[0] + if val.GoIdent.GoImportPath == f.GoImportPath { + return g.QualifiedGoIdent(val.GoIdent) + } else { + // If the enum value is declared in a different Go package, + // reference it by number since the name may not be correct. + // See https://github.com/golang/protobuf/issues/513. + return g.QualifiedGoIdent(field.Enum.GoIdent) + "(" + strconv.FormatInt(int64(val.Desc.Number()), 10) + ")" + } default: return "0" } diff --git a/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go index 17cbe1a39..2ee676fbb 100644 --- a/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go +++ b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go @@ -251,12 +251,13 @@ func (opts Options) New(req *pluginpb.CodeGeneratorRequest) (*Plugin, error) { "\t• a \"M\" argument on the command line.\n\n"+ "See %v for more information.\n", fdesc.GetName(), goPackageDocURL) - case !strings.Contains(string(importPaths[filename]), "/"): - // Check that import paths contain at least one slash to avoid a - // common mistake where import path is confused with package name. + case !strings.Contains(string(importPaths[filename]), ".") && + !strings.Contains(string(importPaths[filename]), "/"): + // Check that import paths contain at least a dot or slash to avoid + // a common mistake where import path is confused with package name. return nil, fmt.Errorf( "invalid Go import path %q for %q\n\n"+ - "The import path must contain at least one forward slash ('/') character.\n\n"+ + "The import path must contain at least one period ('.') or forward slash ('/') character.\n\n"+ "See %v for more information.\n", string(importPaths[filename]), fdesc.GetName(), goPackageDocURL) case packageNames[filename] == "": diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go new file mode 100644 index 000000000..07da5db34 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -0,0 +1,665 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given proto.Message. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable JSON format parser. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // If AllowPartial is set, input for messages that will result in missing + // required fields will not return an error. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } +} + +// Unmarshal reads the given []byte and populates the given proto.Message +// using options in the UnmarshalOptions object. +// It will clear the message first before setting the fields. +// If it returns an error, the given message may be partially set. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + dec := decoder{json.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + + // Check for EOF. + tok, err := dec.Read() + if err != nil { + return err + } + if tok.Kind() != json.EOF { + return dec.unexpectedTokenError(tok) + } + + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *json.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok json.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals a message into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { + if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { + return unmarshal(d, m) + } + + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + return nil + case json.Name: + // Continue below. + } + + name := tok.Name() + // Unmarshaling a non-custom embedded message in Any will contain the + // JSON field "@type" which should be skipped because it is not a field + // of the embedded message, but simply an artifact of the Any format. + if skipTypeURL && name == "@type" { + d.Read() + continue + } + + // Get the FieldDescriptor. + var fd pref.FieldDescriptor + if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { + // Only extension names are in [name] format. + extName := pref.FullName(name[1 : len(name)-1]) + extType, err := d.opts.Resolver.FindExtensionByName(extName) + if err != nil && err != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) + } + if extType != nil { + fd = extType.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } + } else { + // The name can either be the JSON name or the proto field name. + fd = fieldDescs.ByJSONName(name) + if fd == nil { + fd = fieldDescs.ByTextName(name) + } + } + if flags.ProtoLegacy { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + if fd == nil { + // Field is unknown. + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + + // Do not allow duplicate fields. + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "duplicate field %v", tok.RawString()) + } + seenNums.Set(num) + + // No need to set values for JSON null unless the field type is + // google.protobuf.Value or google.protobuf.NullValue. + if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) { + d.Read() + continue + } + + switch { + case fd.IsList(): + list := m.Mutable(fd).List() + if err := d.unmarshalList(list, fd); err != nil { + return err + } + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(mmap, fd); err != nil { + return err + } + default: + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + // Required or optional fields. + if err := d.unmarshalSingular(m, fd); err != nil { + return err + } + } + } +} + +func isKnownValue(fd pref.FieldDescriptor) bool { + md := fd.Message() + return md != nil && md.FullName() == genid.Value_message_fullname +} + +func isNullValue(fd pref.FieldDescriptor) bool { + ed := fd.Enum() + return ed != nil && ed.FullName() == genid.NullValue_enum_fullname +} + +// unmarshalSingular unmarshals to the non-repeated field specified +// by the given FieldDescriptor. +func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) error { + var val pref.Value + var err error + switch fd.Kind() { + case pref.MessageKind, pref.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), false) + default: + val, err = d.unmarshalScalar(fd) + } + + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by +// the given FieldDescriptor. +func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { + const b32 int = 32 + const b64 int = 64 + + tok, err := d.Read() + if err != nil { + return pref.Value{}, err + } + + kind := fd.Kind() + switch kind { + case pref.BoolKind: + if tok.Kind() == json.Bool { + return pref.ValueOfBool(tok.Bool()), nil + } + + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if v, ok := unmarshalInt(tok, b32); ok { + return v, nil + } + + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if v, ok := unmarshalInt(tok, b64); ok { + return v, nil + } + + case pref.Uint32Kind, pref.Fixed32Kind: + if v, ok := unmarshalUint(tok, b32); ok { + return v, nil + } + + case pref.Uint64Kind, pref.Fixed64Kind: + if v, ok := unmarshalUint(tok, b64); ok { + return v, nil + } + + case pref.FloatKind: + if v, ok := unmarshalFloat(tok, b32); ok { + return v, nil + } + + case pref.DoubleKind: + if v, ok := unmarshalFloat(tok, b64); ok { + return v, nil + } + + case pref.StringKind: + if tok.Kind() == json.String { + return pref.ValueOfString(tok.ParsedString()), nil + } + + case pref.BytesKind: + if v, ok := unmarshalBytes(tok); ok { + return v, nil + } + + case pref.EnumKind: + if v, ok := unmarshalEnum(tok, fd); ok { + return v, nil + } + + default: + panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) + } + + return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) +} + +func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) { + switch tok.Kind() { + case json.Number: + return getInt(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return pref.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return pref.Value{}, false + } + return getInt(tok, bitSize) + } + return pref.Value{}, false +} + +func getInt(tok json.Token, bitSize int) (pref.Value, bool) { + n, ok := tok.Int(bitSize) + if !ok { + return pref.Value{}, false + } + if bitSize == 32 { + return pref.ValueOfInt32(int32(n)), true + } + return pref.ValueOfInt64(n), true +} + +func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) { + switch tok.Kind() { + case json.Number: + return getUint(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return pref.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return pref.Value{}, false + } + return getUint(tok, bitSize) + } + return pref.Value{}, false +} + +func getUint(tok json.Token, bitSize int) (pref.Value, bool) { + n, ok := tok.Uint(bitSize) + if !ok { + return pref.Value{}, false + } + if bitSize == 32 { + return pref.ValueOfUint32(uint32(n)), true + } + return pref.ValueOfUint64(n), true +} + +func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) { + switch tok.Kind() { + case json.Number: + return getFloat(tok, bitSize) + + case json.String: + s := tok.ParsedString() + switch s { + case "NaN": + if bitSize == 32 { + return pref.ValueOfFloat32(float32(math.NaN())), true + } + return pref.ValueOfFloat64(math.NaN()), true + case "Infinity": + if bitSize == 32 { + return pref.ValueOfFloat32(float32(math.Inf(+1))), true + } + return pref.ValueOfFloat64(math.Inf(+1)), true + case "-Infinity": + if bitSize == 32 { + return pref.ValueOfFloat32(float32(math.Inf(-1))), true + } + return pref.ValueOfFloat64(math.Inf(-1)), true + } + + // Decode number from string. + if len(s) != len(strings.TrimSpace(s)) { + return pref.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return pref.Value{}, false + } + return getFloat(tok, bitSize) + } + return pref.Value{}, false +} + +func getFloat(tok json.Token, bitSize int) (pref.Value, bool) { + n, ok := tok.Float(bitSize) + if !ok { + return pref.Value{}, false + } + if bitSize == 32 { + return pref.ValueOfFloat32(float32(n)), true + } + return pref.ValueOfFloat64(n), true +} + +func unmarshalBytes(tok json.Token) (pref.Value, bool) { + if tok.Kind() != json.String { + return pref.Value{}, false + } + + s := tok.ParsedString() + enc := base64.StdEncoding + if strings.ContainsAny(s, "-_") { + enc = base64.URLEncoding + } + if len(s)%4 != 0 { + enc = enc.WithPadding(base64.NoPadding) + } + b, err := enc.DecodeString(s) + if err != nil { + return pref.Value{}, false + } + return pref.ValueOfBytes(b), true +} + +func unmarshalEnum(tok json.Token, fd pref.FieldDescriptor) (pref.Value, bool) { + switch tok.Kind() { + case json.String: + // Lookup EnumNumber based on name. + s := tok.ParsedString() + if enumVal := fd.Enum().Values().ByName(pref.Name(s)); enumVal != nil { + return pref.ValueOfEnum(enumVal.Number()), true + } + + case json.Number: + if n, ok := tok.Int(32); ok { + return pref.ValueOfEnum(pref.EnumNumber(n)), true + } + + case json.Null: + // This is only valid for google.protobuf.NullValue. + if isNullValue(fd) { + return pref.ValueOfEnum(0), true + } + } + + return pref.Value{}, false +} + +func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ArrayOpen { + return d.unexpectedTokenError(tok) + } + + switch fd.Kind() { + case pref.MessageKind, pref.GroupKind: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val := list.NewElement() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return err + } + list.Append(val) + } + default: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(val) + } + } + + return nil +} + +func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside the for loop + // below. + var unmarshalMapValue func() (pref.Value, error) + switch fd.MapValue().Kind() { + case pref.MessageKind, pref.GroupKind: + unmarshalMapValue = func() (pref.Value, error) { + val := mmap.NewValue() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return pref.Value{}, err + } + return val, nil + } + default: + unmarshalMapValue = func() (pref.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + break Loop + case json.Name: + // Continue. + } + + // Unmarshal field name. + pkey, err := d.unmarshalMapKey(tok, fd.MapKey()) + if err != nil { + return err + } + + // Check for duplicate field name. + if mmap.Has(pkey) { + return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString()) + } + + // Read and unmarshal field value. + pval, err := unmarshalMapValue() + if err != nil { + return err + } + + mmap.Set(pkey, pval) + } + + return nil +} + +// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. +// A map key type is any integral or string type. +func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref.MapKey, error) { + const b32 = 32 + const b64 = 64 + const base10 = 10 + + name := tok.Name() + kind := fd.Kind() + switch kind { + case pref.StringKind: + return pref.ValueOfString(name).MapKey(), nil + + case pref.BoolKind: + switch name { + case "true": + return pref.ValueOfBool(true).MapKey(), nil + case "false": + return pref.ValueOfBool(false).MapKey(), nil + } + + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if n, err := strconv.ParseInt(name, base10, b32); err == nil { + return pref.ValueOfInt32(int32(n)).MapKey(), nil + } + + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if n, err := strconv.ParseInt(name, base10, b64); err == nil { + return pref.ValueOfInt64(int64(n)).MapKey(), nil + } + + case pref.Uint32Kind, pref.Fixed32Kind: + if n, err := strconv.ParseUint(name, base10, b32); err == nil { + return pref.ValueOfUint32(uint32(n)).MapKey(), nil + } + + case pref.Uint64Kind, pref.Fixed64Kind: + if n, err := strconv.ParseUint(name, base10, b64); err == nil { + return pref.ValueOfUint64(uint64(n)).MapKey(), nil + } + + default: + panic(fmt.Sprintf("invalid kind for map key: %v", kind)) + } + + return pref.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go new file mode 100644 index 000000000..00ea2fecf --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protojson marshals and unmarshals protocol buffer messages as JSON +// format. It follows the guide at +// https://developers.google.com/protocol-buffers/docs/proto3#json. +// +// This package produces a different output than the standard "encoding/json" +// package, which does not operate correctly on protocol buffer messages. +package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go new file mode 100644 index 000000000..ba971f078 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -0,0 +1,344 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given proto.Message in JSON format using default options. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable JSON format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // UseProtoNames uses proto field name instead of lowerCamelCase name in JSON + // field names. + UseProtoNames bool + + // UseEnumNumbers emits enum values as numbers. + UseEnumNumbers bool + + // EmitUnpopulated specifies whether to emit unpopulated fields. It does not + // emit unpopulated oneof fields or unpopulated extension fields. + // The JSON value emitted for unpopulated fields are as follows: + // ╔═══════╤════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════╣ + // ║ false │ proto3 boolean fields ║ + // ║ 0 │ proto3 numeric fields ║ + // ║ "" │ proto3 string/bytes fields ║ + // ║ null │ proto2 scalar fields ║ + // ║ null │ message fields ║ + // ║ [] │ list fields ║ + // ║ {} │ map fields ║ + // ╚═══════╧════════════════════════════╝ + EmitUnpopulated bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.AllowPartial = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal marshals the given proto.Message in the JSON format using options in +// MarshalOptions. Do not depend on the output being stable. It may change over +// time across different versions of the program. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := json.NewEncoder(o.Indent) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case the output in an empty JSON object. + if m == nil { + return []byte("{}"), nil + } + + enc := encoder{internalEnc, o} + if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil { + return nil, err + } + if o.AllowPartial { + return enc.Bytes(), nil + } + return enc.Bytes(), proto.CheckInitialized(m) +} + +type encoder struct { + *json.Encoder + opts MarshalOptions +} + +// typeFieldDesc is a synthetic field descriptor used for the "@type" field. +var typeFieldDesc = func() protoreflect.FieldDescriptor { + var fd filedesc.Field + fd.L0.FullName = "@type" + fd.L0.Index = -1 + fd.L1.Cardinality = protoreflect.Optional + fd.L1.Kind = protoreflect.StringKind + return &fd +}() + +// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method +// to additionally iterate over a synthetic field for the type URL. +type typeURLFieldRanger struct { + order.FieldRanger + typeURL string +} + +func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + if !f(typeFieldDesc, pref.ValueOfString(m.typeURL)) { + return + } + m.FieldRanger.Range(f) +} + +// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range +// method to additionally iterate over unpopulated fields. +type unpopulatedFieldRanger struct{ pref.Message } + +func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if m.Has(fd) || fd.ContainingOneof() != nil { + continue // ignore populated fields and fields within a oneofs + } + + v := m.Get(fd) + isProto2Scalar := fd.Syntax() == pref.Proto2 && fd.Default().IsValid() + isSingularMessage := fd.Cardinality() != pref.Repeated && fd.Message() != nil + if isProto2Scalar || isSingularMessage { + v = pref.Value{} // use invalid value to emit null + } + if !f(fd, v) { + return + } + } + m.Message.Range(f) +} + +// marshalMessage marshals the fields in the given protoreflect.Message. +// If the typeURL is non-empty, then a synthetic "@type" field is injected +// containing the URL as the value. +func (e encoder) marshalMessage(m pref.Message, typeURL string) error { + if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { + return errors.New("no support for proto1 MessageSets") + } + + if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { + return marshal(e, m) + } + + e.StartObject() + defer e.EndObject() + + var fields order.FieldRanger = m + if e.opts.EmitUnpopulated { + fields = unpopulatedFieldRanger{m} + } + if typeURL != "" { + fields = typeURLFieldRanger{fields, typeURL} + } + + var err error + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd pref.FieldDescriptor, v pref.Value) bool { + name := fd.JSONName() + if e.opts.UseProtoNames { + name = fd.TextName() + } + + if err = e.WriteName(name); err != nil { + return false + } + if err = e.marshalValue(v, fd); err != nil { + return false + } + return true + }) + return err +} + +// marshalValue marshals the given protoreflect.Value. +func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(val.List(), fd) + case fd.IsMap(): + return e.marshalMap(val.Map(), fd) + default: + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { + if !val.IsValid() { + e.WriteNull() + return nil + } + + switch kind := fd.Kind(); kind { + case pref.BoolKind: + e.WriteBool(val.Bool()) + + case pref.StringKind: + if e.WriteString(val.String()) != nil { + return errors.InvalidUTF8(string(fd.FullName())) + } + + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + e.WriteInt(val.Int()) + + case pref.Uint32Kind, pref.Fixed32Kind: + e.WriteUint(val.Uint()) + + case pref.Int64Kind, pref.Sint64Kind, pref.Uint64Kind, + pref.Sfixed64Kind, pref.Fixed64Kind: + // 64-bit integers are written out as JSON string. + e.WriteString(val.String()) + + case pref.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case pref.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case pref.BytesKind: + e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) + + case pref.EnumKind: + if fd.Enum().FullName() == genid.NullValue_enum_fullname { + e.WriteNull() + } else { + desc := fd.Enum().Values().ByNumber(val.Enum()) + if e.opts.UseEnumNumbers || desc == nil { + e.WriteInt(int64(val.Enum())) + } else { + e.WriteString(string(desc.Name())) + } + } + + case pref.MessageKind, pref.GroupKind: + if err := e.marshalMessage(val.Message(), ""); err != nil { + return err + } + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List. +func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error { + e.StartArray() + defer e.EndArray() + + for i := 0; i < list.Len(); i++ { + item := list.Get(i) + if err := e.marshalSingular(item, fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals given protoreflect.Map. +func (e encoder) marshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { + e.StartObject() + defer e.EndObject() + + var err error + order.RangeEntries(mmap, order.GenericKeyOrder, func(k pref.MapKey, v pref.Value) bool { + if err = e.WriteName(k.String()); err != nil { + return false + } + if err = e.marshalSingular(v, fd.MapValue()); err != nil { + return false + } + return true + }) + return err +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go new file mode 100644 index 000000000..72924a905 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -0,0 +1,889 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + "time" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type marshalFunc func(encoder, pref.Message) error + +// wellKnownTypeMarshaler returns a marshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeMarshaler(name pref.FullName) marshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return encoder.marshalAny + case genid.Timestamp_message_name: + return encoder.marshalTimestamp + case genid.Duration_message_name: + return encoder.marshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return encoder.marshalWrapperType + case genid.Struct_message_name: + return encoder.marshalStruct + case genid.ListValue_message_name: + return encoder.marshalListValue + case genid.Value_message_name: + return encoder.marshalKnownValue + case genid.FieldMask_message_name: + return encoder.marshalFieldMask + case genid.Empty_message_name: + return encoder.marshalEmpty + } + } + return nil +} + +type unmarshalFunc func(decoder, pref.Message) error + +// wellKnownTypeUnmarshaler returns a unmarshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return decoder.unmarshalAny + case genid.Timestamp_message_name: + return decoder.unmarshalTimestamp + case genid.Duration_message_name: + return decoder.unmarshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return decoder.unmarshalWrapperType + case genid.Struct_message_name: + return decoder.unmarshalStruct + case genid.ListValue_message_name: + return decoder.unmarshalListValue + case genid.Value_message_name: + return decoder.unmarshalKnownValue + case genid.FieldMask_message_name: + return decoder.unmarshalFieldMask + case genid.Empty_message_name: + return decoder.unmarshalEmpty + } + } + return nil +} + +// The JSON representation of an Any message uses the regular representation of +// the deserialized, embedded message, with an additional field `@type` which +// contains the type URL. If the embedded message type is well-known and has a +// custom JSON representation, that representation will be embedded adding a +// field `value` which holds the custom JSON in addition to the `@type` field. + +func (e encoder) marshalAny(m pref.Message) error { + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + if !m.Has(fdType) { + if !m.Has(fdValue) { + // If message is empty, marshal out empty JSON object. + e.StartObject() + e.EndObject() + return nil + } else { + // Return error if type_url field is not set, but value is set. + return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name) + } + } + + typeVal := m.Get(fdType) + valueVal := m.Get(fdValue) + + // Resolve the type in order to unmarshal value field. + typeURL := typeVal.String() + emt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err) + } + + em := emt.New() + err = proto.UnmarshalOptions{ + AllowPartial: true, // never check required fields inside an Any + Resolver: e.opts.Resolver, + }.Unmarshal(valueVal.Bytes(), em.Interface()) + if err != nil { + return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err) + } + + // If type of value has custom JSON encoding, marshal out a field "value" + // with corresponding custom JSON encoding of the embedded message as a + // field. + if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil { + e.StartObject() + defer e.EndObject() + + // Marshal out @type field. + e.WriteName("@type") + if err := e.WriteString(typeURL); err != nil { + return err + } + + e.WriteName("value") + return marshal(e, em) + } + + // Else, marshal out the embedded message's fields in this Any object. + if err := e.marshalMessage(em, typeURL); err != nil { + return err + } + + return nil +} + +func (d decoder) unmarshalAny(m pref.Message) error { + // Peek to check for json.ObjectOpen to avoid advancing a read. + start, err := d.Peek() + if err != nil { + return err + } + if start.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(start) + } + + // Use another decoder to parse the unread bytes for @type field. This + // avoids advancing a read from current decoder because the current JSON + // object may contain the fields of the embedded type. + dec := decoder{d.Clone(), UnmarshalOptions{}} + tok, err := findTypeURL(dec) + switch err { + case errEmptyObject: + // An empty JSON object translates to an empty Any message. + d.Read() // Read json.ObjectOpen. + d.Read() // Read json.ObjectClose. + return nil + + case errMissingType: + if d.opts.DiscardUnknown { + // Treat all fields as unknowns, similar to an empty object. + return d.skipJSONValue() + } + // Use start.Pos() for line position. + return d.newError(start.Pos(), err.Error()) + + default: + if err != nil { + return err + } + } + + typeURL := tok.ParsedString() + emt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err) + } + + // Create new message for the embedded message type and unmarshal into it. + em := emt.New() + if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil { + // If embedded message is a custom type, + // unmarshal the JSON "value" field into it. + if err := d.unmarshalAnyValue(unmarshal, em); err != nil { + return err + } + } else { + // Else unmarshal the current JSON object into it. + if err := d.unmarshalMessage(em, true); err != nil { + return err + } + } + // Serialize the embedded message and assign the resulting bytes to the + // proto value field. + b, err := proto.MarshalOptions{ + AllowPartial: true, // No need to check required fields inside an Any. + Deterministic: true, + }.Marshal(em.Interface()) + if err != nil { + return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err) + } + + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + m.Set(fdType, pref.ValueOfString(typeURL)) + m.Set(fdValue, pref.ValueOfBytes(b)) + return nil +} + +var errEmptyObject = fmt.Errorf(`empty object`) +var errMissingType = fmt.Errorf(`missing "@type" field`) + +// findTypeURL returns the token for the "@type" field value from the given +// JSON bytes. It is expected that the given bytes start with json.ObjectOpen. +// It returns errEmptyObject if the JSON object is empty or errMissingType if +// @type field does not exist. It returns other error if the @type field is not +// valid or other decoding issues. +func findTypeURL(d decoder) (json.Token, error) { + var typeURL string + var typeTok json.Token + numFields := 0 + // Skip start object. + d.Read() + +Loop: + for { + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + + switch tok.Kind() { + case json.ObjectClose: + if typeURL == "" { + // Did not find @type field. + if numFields > 0 { + return json.Token{}, errMissingType + } + return json.Token{}, errEmptyObject + } + break Loop + + case json.Name: + numFields++ + if tok.Name() != "@type" { + // Skip value. + if err := d.skipJSONValue(); err != nil { + return json.Token{}, err + } + continue + } + + // Return error if this was previously set already. + if typeURL != "" { + return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`) + } + // Read field value. + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + if tok.Kind() != json.String { + return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString()) + } + typeURL = tok.ParsedString() + if typeURL == "" { + return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`) + } + typeTok = tok + } + } + + return typeTok, nil +} + +// skipJSONValue parses a JSON value (null, boolean, string, number, object and +// array) in order to advance the read to the next JSON value. It relies on +// the decoder returning an error if the types are not in valid sequence. +func (d decoder) skipJSONValue() error { + tok, err := d.Read() + if err != nil { + return err + } + // Only need to continue reading for objects and arrays. + switch tok.Kind() { + case json.ObjectOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + case json.Name: + // Skip object field value. + if err := d.skipJSONValue(); err != nil { + return err + } + } + } + + case json.ArrayOpen: + for { + tok, err := d.Peek() + if err != nil { + return err + } + switch tok.Kind() { + case json.ArrayClose: + d.Read() + return nil + default: + // Skip array item. + if err := d.skipJSONValue(); err != nil { + return err + } + } + } + } + return nil +} + +// unmarshalAnyValue unmarshals the given custom-type message from the JSON +// object's "value" field. +func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) error { + // Skip ObjectOpen, and start reading the fields. + d.Read() + + var found bool // Used for detecting duplicate "value". + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + if !found { + return d.newError(tok.Pos(), `missing "value" field`) + } + return nil + + case json.Name: + switch tok.Name() { + case "@type": + // Skip the value as this was previously parsed already. + d.Read() + + case "value": + if found { + return d.newError(tok.Pos(), `duplicate "value" field`) + } + // Unmarshal the field value into the given message. + if err := unmarshal(d, m); err != nil { + return err + } + found = true + + default: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + } + } +} + +// Wrapper types are encoded as JSON primitives like string, number or boolean. + +func (e encoder) marshalWrapperType(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val := m.Get(fd) + return e.marshalSingular(val, fd) +} + +func (d decoder) unmarshalWrapperType(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// The JSON representation for Empty is an empty JSON object. + +func (e encoder) marshalEmpty(pref.Message) error { + e.StartObject() + e.EndObject() + return nil +} + +func (d decoder) unmarshalEmpty(pref.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + + case json.Name: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + + default: + return d.unexpectedTokenError(tok) + } + } +} + +// The JSON representation for Struct is a JSON object that contains the encoded +// Struct.fields map and follows the serialization rules for a map. + +func (e encoder) marshalStruct(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return e.marshalMap(m.Get(fd).Map(), fd) +} + +func (d decoder) unmarshalStruct(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return d.unmarshalMap(m.Mutable(fd).Map(), fd) +} + +// The JSON representation for ListValue is JSON array that contains the encoded +// ListValue.values repeated field and follows the serialization rules for a +// repeated field. + +func (e encoder) marshalListValue(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return e.marshalList(m.Get(fd).List(), fd) +} + +func (d decoder) unmarshalListValue(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return d.unmarshalList(m.Mutable(fd).List(), fd) +} + +// The JSON representation for a Value is dependent on the oneof field that is +// set. Each of the field in the oneof has its own custom serialization rule. A +// Value message needs to be a oneof field set, else it is an error. + +func (e encoder) marshalKnownValue(m pref.Message) error { + od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname) + } + if fd.Number() == genid.Value_NumberValue_field_number { + if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) { + return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v) + } + } + return e.marshalSingular(m.Get(fd), fd) +} + +func (d decoder) unmarshalKnownValue(m pref.Message) error { + tok, err := d.Peek() + if err != nil { + return err + } + + var fd pref.FieldDescriptor + var val pref.Value + switch tok.Kind() { + case json.Null: + d.Read() + fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) + val = pref.ValueOfEnum(0) + + case json.Bool: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) + val = pref.ValueOfBool(tok.Bool()) + + case json.Number: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number) + var ok bool + val, ok = unmarshalFloat(tok, 64) + if !ok { + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + case json.String: + // A JSON string may have been encoded from the number_value field, + // e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows + // for it to be in JSON string form. Given this custom encoding spec, + // however, there is no way to identify that and hence a JSON string is + // always assigned to the string_value field, which means that certain + // encoding cannot be parsed back to the same field. + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) + val = pref.ValueOfString(tok.ParsedString()) + + case json.ObjectOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalStruct(val.Message()); err != nil { + return err + } + + case json.ArrayOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalListValue(val.Message()); err != nil { + return err + } + + default: + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + m.Set(fd, val) + return nil +} + +// The JSON representation for a Duration is a JSON string that ends in the +// suffix "s" (indicating seconds) and is preceded by the number of seconds, +// with nanoseconds expressed as fractional seconds. +// +// Durations less than one second are represented with a 0 seconds field and a +// positive or negative nanos field. For durations of one second or more, a +// non-zero value for the nanos field must be of the same sign as the seconds +// field. +// +// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive. +// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive. + +const ( + secondsInNanos = 999999999 + maxSecondsInDuration = 315576000000 +) + +func (e encoder) marshalDuration(m pref.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs) + } + if nanos < -secondsInNanos || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos) + } + if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) { + return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname) + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + var sign string + if secs < 0 || nanos < 0 { + sign, secs, nanos = "-", -1*secs, -1*nanos + } + x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "s") + return nil +} + +func (d decoder) unmarshalDuration(m pref.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + secs, nanos, ok := parseDuration(tok.ParsedString()) + if !ok { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString()) + } + // Validate seconds. No need to validate nanos because parseDuration would + // have covered that already. + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + m.Set(fdSeconds, pref.ValueOfInt64(secs)) + m.Set(fdNanos, pref.ValueOfInt32(nanos)) + return nil +} + +// parseDuration parses the given input string for seconds and nanoseconds value +// for the Duration JSON format. The format is a decimal number with a suffix +// 's'. It can have optional plus/minus sign. There needs to be at least an +// integer or fractional part. Fractional part is limited to 9 digits only for +// nanoseconds precision, regardless of whether there are trailing zero digits. +// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s. +func parseDuration(input string) (int64, int32, bool) { + b := []byte(input) + size := len(b) + if size < 2 { + return 0, 0, false + } + if b[size-1] != 's' { + return 0, 0, false + } + b = b[:size-1] + + // Read optional plus/minus symbol. + var neg bool + switch b[0] { + case '-': + neg = true + b = b[1:] + case '+': + b = b[1:] + } + if len(b) == 0 { + return 0, 0, false + } + + // Read the integer part. + var intp []byte + switch { + case b[0] == '0': + b = b[1:] + + case '1' <= b[0] && b[0] <= '9': + intp = b[0:] + b = b[1:] + n := 1 + for len(b) > 0 && '0' <= b[0] && b[0] <= '9' { + n++ + b = b[1:] + } + intp = intp[:n] + + case b[0] == '.': + // Continue below. + + default: + return 0, 0, false + } + + hasFrac := false + var frac [9]byte + if len(b) > 0 { + if b[0] != '.' { + return 0, 0, false + } + // Read the fractional part. + b = b[1:] + n := 0 + for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' { + frac[n] = b[0] + n++ + b = b[1:] + } + // It is not valid if there are more bytes left. + if len(b) > 0 { + return 0, 0, false + } + // Pad fractional part with 0s. + for i := n; i < 9; i++ { + frac[i] = '0' + } + hasFrac = true + } + + var secs int64 + if len(intp) > 0 { + var err error + secs, err = strconv.ParseInt(string(intp), 10, 64) + if err != nil { + return 0, 0, false + } + } + + var nanos int64 + if hasFrac { + nanob := bytes.TrimLeft(frac[:], "0") + if len(nanob) > 0 { + var err error + nanos, err = strconv.ParseInt(string(nanob), 10, 32) + if err != nil { + return 0, 0, false + } + } + } + + if neg { + if secs > 0 { + secs = -secs + } + if nanos > 0 { + nanos = -nanos + } + } + return secs, int32(nanos), true +} + +// The JSON representation for a Timestamp is a JSON string in the RFC 3339 +// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where +// {year} is always expressed using four digits while {month}, {day}, {hour}, +// {min}, and {sec} are zero-padded to two digits each. The fractional seconds, +// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The +// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding +// should always use UTC (as indicated by "Z") and a decoder should be able to +// accept both UTC and other timezones (as indicated by an offset). +// +// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z +// inclusive. +// Timestamp.nanos must be from 0 to 999,999,999 inclusive. + +const ( + maxTimestampSeconds = 253402300799 + minTimestampSeconds = -62135596800 +) + +func (e encoder) marshalTimestamp(m pref.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs) + } + if nanos < 0 || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos) + } + // Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3, + // 6 or 9 fractional digits. + t := time.Unix(secs, nanos).UTC() + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "Z") + return nil +} + +func (d decoder) unmarshalTimestamp(m pref.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + t, err := time.Parse(time.RFC3339Nano, tok.ParsedString()) + if err != nil { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate seconds. No need to validate nanos because time.Parse would have + // covered that already. + secs := t.Unix() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + m.Set(fdSeconds, pref.ValueOfInt64(secs)) + m.Set(fdNanos, pref.ValueOfInt32(int32(t.Nanosecond()))) + return nil +} + +// The JSON representation for a FieldMask is a JSON string where paths are +// separated by a comma. Fields name in each path are converted to/from +// lower-camel naming conventions. Encoding should fail if the path name would +// end up differently after a round-trip. + +func (e encoder) marshalFieldMask(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Get(fd).List() + paths := make([]string, 0, list.Len()) + + for i := 0; i < list.Len(); i++ { + s := list.Get(i).String() + if !pref.FullName(s).IsValid() { + return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) + } + // Return error if conversion to camelCase is not reversible. + cc := strs.JSONCamelCase(s) + if s != strs.JSONSnakeCase(cc) { + return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s) + } + paths = append(paths, cc) + } + + e.WriteString(strings.Join(paths, ",")) + return nil +} + +func (d decoder) unmarshalFieldMask(m pref.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + str := strings.TrimSpace(tok.ParsedString()) + if str == "" { + return nil + } + paths := strings.Split(str, ",") + + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Mutable(fd).List() + + for _, s0 := range paths { + s := strs.JSONSnakeCase(s0) + if strings.Contains(s0, "_") || !pref.FullName(s).IsValid() { + return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) + } + list.Append(pref.ValueOfString(s)) + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 8fb1d9e08..179d6e8fc 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -744,9 +744,6 @@ func (d decoder) skipValue() error { // Skip items. This will not validate whether skipped values are // of the same type or not, same behavior as C++ // TextFormat::Parser::AllowUnknownField(true) version 3.8.0. - if err := d.skipValue(); err != nil { - return err - } } } } diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index a427f8b70..9c61112f5 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -21,10 +21,11 @@ import ( type Number int32 const ( - MinValidNumber Number = 1 - FirstReservedNumber Number = 19000 - LastReservedNumber Number = 19999 - MaxValidNumber Number = 1<<29 - 1 + MinValidNumber Number = 1 + FirstReservedNumber Number = 19000 + LastReservedNumber Number = 19999 + MaxValidNumber Number = 1<<29 - 1 + DefaultRecursionLimit = 10000 ) // IsValid reports whether the field number is semantically valid. @@ -55,6 +56,7 @@ const ( errCodeOverflow errCodeReserved errCodeEndGroup + errCodeRecursionDepth ) var ( @@ -112,6 +114,10 @@ func ConsumeField(b []byte) (Number, Type, int) { // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { + return consumeFieldValueD(num, typ, b, DefaultRecursionLimit) +} + +func consumeFieldValueD(num Number, typ Type, b []byte, depth int) (n int) { switch typ { case VarintType: _, n = ConsumeVarint(b) @@ -126,6 +132,9 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { _, n = ConsumeBytes(b) return n case StartGroupType: + if depth < 0 { + return errCodeRecursionDepth + } n0 := len(b) for { num2, typ2, n := ConsumeTag(b) @@ -140,7 +149,7 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { return n0 - len(b) } - n = ConsumeFieldValue(num2, typ2, b) + n = consumeFieldValueD(num2, typ2, b, depth-1) if n < 0 { return n // forward error code } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go new file mode 100644 index 000000000..b13fd29e8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -0,0 +1,340 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "io" + "regexp" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +const unexpectedFmt = "unexpected token %s" + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// Decoder is a token-based JSON decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing ObjectOpen and ArrayOpen values. The + // top of stack represents the object or the array the current value is + // directly located in. + openStack []Kind + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// Peek looks ahead and returns the next token kind without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next JSON token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + const scalar = Null | Bool | Number | String + + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext() + if err != nil { + return Token{}, err + } + + switch tok.kind { + case EOF: + if len(d.openStack) != 0 || + d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 { + return Token{}, ErrUnexpectedEOF + } + + case Null: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case Bool, Number: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case String: + if d.isValueNext() { + break + } + // This string token should only be for a field name. + if d.lastToken.kind&(ObjectOpen|comma) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + if len(d.in) == 0 { + return Token{}, ErrUnexpectedEOF + } + if c := d.in[0]; c != ':' { + return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c)) + } + tok.kind = Name + d.consume(1) + + case ObjectOpen, ArrayOpen: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = append(d.openStack, tok.kind) + + case ObjectClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ObjectOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case ArrayClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ArrayOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case comma: + if len(d.openStack) == 0 || + d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + } + + // Update d.lastToken only after validating token to be in the right sequence. + d.lastToken = tok + + if d.lastToken.kind == comma { + return d.Read() + } + return tok, nil +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`) + +// parseNext parses for the next JSON token. It returns a Token object for +// different types, except for Name. It does not handle whether the next token +// is in a valid sequence or not. +func (d *Decoder) parseNext() (Token, error) { + // Trim leading spaces. + d.consume(0) + + in := d.in + if len(in) == 0 { + return d.consumeToken(EOF, 0), nil + } + + switch in[0] { + case 'n': + if n := matchWithDelim("null", in); n != 0 { + return d.consumeToken(Null, n), nil + } + + case 't': + if n := matchWithDelim("true", in); n != 0 { + return d.consumeBoolToken(true, n), nil + } + + case 'f': + if n := matchWithDelim("false", in); n != 0 { + return d.consumeBoolToken(false, n), nil + } + + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if n, ok := parseNumber(in); ok { + return d.consumeToken(Number, n), nil + } + + case '"': + s, n, err := d.parseString(in) + if err != nil { + return Token{}, err + } + return d.consumeStringToken(s, n), nil + + case '{': + return d.consumeToken(ObjectOpen, 1), nil + + case '}': + return d.consumeToken(ObjectClose, 1), nil + + case '[': + return d.consumeToken(ArrayOpen, 1), nil + + case ']': + return d.consumeToken(ArrayClose, 1), nil + + case ',': + return d.consumeToken(comma, 1), nil + } + return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in)) +} + +// newSyntaxError returns an error with line and column information useful for +// syntax errors. +func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error { + e := errors.New(f, x...) + line, column := d.Position(pos) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +// currPos returns the current index position of d.in from d.orig. +func (d *Decoder) currPos() int { + return len(d.orig) - len(d.in) +} + +// matchWithDelim matches s with the input b and verifies that the match +// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]"). +// As a special case, EOF is considered a delimiter. It returns the length of s +// if there is a match, else 0. +func matchWithDelim(s string, b []byte) int { + if !bytes.HasPrefix(b, []byte(s)) { + return 0 + } + + n := len(s) + if n < len(b) && isNotDelim(b[n]) { + return 0 + } + return n +} + +// isNotDelim returns true if given byte is a not delimiter character. +func isNotDelim(c byte) bool { + return (c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} + +// consume consumes n bytes of input and any subsequent whitespace. +func (d *Decoder) consume(n int) { + d.in = d.in[n:] + for len(d.in) > 0 { + switch d.in[0] { + case ' ', '\n', '\r', '\t': + d.in = d.in[1:] + default: + return + } + } +} + +// isValueNext returns true if next type should be a JSON value: Null, +// Number, String or Bool. +func (d *Decoder) isValueNext() bool { + if len(d.openStack) == 0 { + return d.lastToken.kind == 0 + } + + start := d.openStack[len(d.openStack)-1] + switch start { + case ObjectOpen: + return d.lastToken.kind&Name != 0 + case ArrayOpen: + return d.lastToken.kind&(ArrayOpen|comma) != 0 + } + panic(fmt.Sprintf( + "unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v", + d.lastToken.kind, start)) +} + +// consumeToken constructs a Token for given Kind with raw value derived from +// current d.in and given size, and consumes the given size-lenght of it. +func (d *Decoder) consumeToken(kind Kind, size int) Token { + tok := Token{ + kind: kind, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + } + d.consume(size) + return tok +} + +// consumeBoolToken constructs a Token for a Bool kind with raw value derived from +// current d.in and given size. +func (d *Decoder) consumeBoolToken(b bool, size int) Token { + tok := Token{ + kind: Bool, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + boo: b, + } + d.consume(size) + return tok +} + +// consumeStringToken constructs a Token for a String kind with raw value derived +// from current d.in and given size. +func (d *Decoder) consumeStringToken(s string, size int) Token { + tok := Token{ + kind: String, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + str: s, + } + d.consume(size) + return tok +} + +// Clone returns a copy of the Decoder for use in reading ahead the next JSON +// object, array or other values without affecting current Decoder. +func (d *Decoder) Clone() *Decoder { + ret := *d + ret.openStack = append([]Kind(nil), ret.openStack...) + return &ret +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go new file mode 100644 index 000000000..2999d7133 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go @@ -0,0 +1,254 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "strconv" +) + +// parseNumber reads the given []byte for a valid JSON number. If it is valid, +// it returns the number of bytes. Parsing logic follows the definition in +// https://tools.ietf.org/html/rfc7159#section-6, and is based off +// encoding/json.isValidNumber function. +func parseNumber(input []byte) (int, bool) { + var n int + + s := input + if len(s) == 0 { + return 0, false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + + // Digits + switch { + case s[0] == '0': + s = s[1:] + n++ + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + n++ + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + + default: + return 0, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + n += 2 + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + n++ + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // Check that next byte is a delimiter or it is at the end. + if n < len(input) && isNotDelim(input[n]) { + return 0, false + } + + return n, true +} + +// numberParts is the result of parsing out a valid JSON number. It contains +// the parts of a number. The parts are used for integer conversion. +type numberParts struct { + neg bool + intp []byte + frac []byte + exp []byte +} + +// parseNumber constructs numberParts from given []byte. The logic here is +// similar to consumeNumber above with the difference of having to construct +// numberParts. The slice fields in numberParts are subslices of the input. +func parseNumberParts(input []byte) (numberParts, bool) { + var neg bool + var intp []byte + var frac []byte + var exp []byte + + s := input + if len(s) == 0 { + return numberParts{}, false + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + if len(s) == 0 { + return numberParts{}, false + } + } + + // Digits + switch { + case s[0] == '0': + // Skip first 0 and no need to store. + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + intp = s + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + intp = intp[:n] + + default: + return numberParts{}, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + frac = s[1:] + n := 1 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + frac = frac[:n] + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + exp = s + n := 0 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return numberParts{}, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + exp = exp[:n] + } + + return numberParts{ + neg: neg, + intp: intp, + frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right. + exp: exp, + }, true +} + +// normalizeToIntString returns an integer string in normal form without the +// E-notation for given numberParts. It will return false if it is not an +// integer or if the exponent exceeds than max/min int value. +func normalizeToIntString(n numberParts) (string, bool) { + intpSize := len(n.intp) + fracSize := len(n.frac) + + if intpSize == 0 && fracSize == 0 { + return "0", true + } + + var exp int + if len(n.exp) > 0 { + i, err := strconv.ParseInt(string(n.exp), 10, 32) + if err != nil { + return "", false + } + exp = int(i) + } + + var num []byte + if exp >= 0 { + // For positive E, shift fraction digits into integer part and also pad + // with zeroes as needed. + + // If there are more digits in fraction than the E value, then the + // number is not an integer. + if fracSize > exp { + return "", false + } + + // Make sure resulting digits are within max value limit to avoid + // unnecessarily constructing a large byte slice that may simply fail + // later on. + const maxDigits = 20 // Max uint64 value has 20 decimal digits. + if intpSize+exp > maxDigits { + return "", false + } + + // Set cap to make a copy of integer part when appended. + num = n.intp[:len(n.intp):len(n.intp)] + num = append(num, n.frac...) + for i := 0; i < exp-fracSize; i++ { + num = append(num, '0') + } + } else { + // For negative E, shift digits in integer part out. + + // If there are fractions, then the number is not an integer. + if fracSize > 0 { + return "", false + } + + // index is where the decimal point will be after adjusting for negative + // exponent. + index := intpSize + exp + if index < 0 { + return "", false + } + + num = n.intp + // If any of the digits being shifted to the right of the decimal point + // is non-zero, then the number is not an integer. + for i := index; i < intpSize; i++ { + if num[i] != '0' { + return "", false + } + } + num = num[:index] + } + + if n.neg { + return "-" + string(num), true + } + return string(num), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go new file mode 100644 index 000000000..f7fea7d8d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go @@ -0,0 +1,91 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +func (d *Decoder) parseString(in []byte) (string, int, error) { + in0 := in + if len(in) == 0 { + return "", 0, ErrUnexpectedEOF + } + if in[0] != '"' { + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0]) + } + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string") + case r < ' ': + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r) + case r == '"': + in = in[1:] + n := len(in0) - len(in) + return string(out), n, nil + case r == '\\': + if len(in) < 2 { + return "", 0, ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\\', '/': + in, out = in[2:], append(out, r) + case 'b': + in, out = in[2:], append(out, '\b') + case 'f': + in, out = in[2:], append(out, '\f') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'u': + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + if err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || + r == unicode.ReplacementChar || err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", 0, ErrUnexpectedEOF +} + +// indexNeedEscapeInBytes returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go new file mode 100644 index 000000000..50578d659 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go @@ -0,0 +1,192 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "strconv" +) + +// Kind represents a token kind expressible in the JSON format. +type Kind uint16 + +const ( + Invalid Kind = (1 << iota) / 2 + EOF + Null + Bool + Number + String + Name + ObjectOpen + ObjectClose + ArrayOpen + ArrayClose + + // comma is only for parsing in between tokens and + // does not need to be exported. + comma +) + +func (k Kind) String() string { + switch k { + case EOF: + return "eof" + case Null: + return "null" + case Bool: + return "bool" + case Number: + return "number" + case String: + return "string" + case ObjectOpen: + return "{" + case ObjectClose: + return "}" + case Name: + return "name" + case ArrayOpen: + return "[" + case ArrayClose: + return "]" + case comma: + return "," + } + return "" +} + +// Token provides a parsed token kind and value. +// +// Values are provided by the difference accessor methods. The accessor methods +// Name, Bool, and ParsedString will panic if called on the wrong kind. There +// are different accessor methods for the Number kind for converting to the +// appropriate Go numeric type and those methods have the ok return value. +type Token struct { + // Token kind. + kind Kind + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // boo is parsed boolean value. + boo bool + // str is parsed string value. + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// Name returns the object name if token is Name, else it panics. +func (t Token) Name() string { + if t.kind == Name { + return t.str + } + panic(fmt.Sprintf("Token is not a Name: %v", t.RawString())) +} + +// Bool returns the bool value if token kind is Bool, else it panics. +func (t Token) Bool() bool { + if t.kind == Bool { + return t.boo + } + panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString())) +} + +// ParsedString returns the string value for a JSON string token or the read +// value in string if token is not a string. +func (t Token) ParsedString() string { + if t.kind == String { + return t.str + } + panic(fmt.Sprintf("Token is not a String: %v", t.RawString())) +} + +// Float returns the floating-point number if token kind is Number. +// +// The floating-point precision is specified by the bitSize parameter: 32 for +// float32 or 64 for float64. If bitSize=32, the result still has type float64, +// but it will be convertible to float32 without changing its value. It will +// return false if the number exceeds the floating point limits for given +// bitSize. +func (t Token) Float(bitSize int) (float64, bool) { + if t.kind != Number { + return 0, false + } + f, err := strconv.ParseFloat(t.RawString(), bitSize) + if err != nil { + return 0, false + } + return f, true +} + +// Int returns the signed integer number if token is Number. +// +// The given bitSize specifies the integer type that the result must fit into. +// It returns false if the number is not an integer value or if the result +// exceeds the limits for given bitSize. +func (t Token) Int(bitSize int) (int64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseInt(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +// Uint returns the signed integer number if token is Number. +// +// The given bitSize specifies the unsigned integer type that the result must +// fit into. It returns false if the number is not an unsigned integer value +// or if the result exceeds the limits for given bitSize. +func (t Token) Uint(bitSize int) (uint64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseUint(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +func (t Token) getIntStr() (string, bool) { + if t.kind != Number { + return "", false + } + parts, ok := parseNumberParts(t.raw) + if !ok { + return "", false + } + return normalizeToIntString(parts) +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.boo == y.boo && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go new file mode 100644 index 000000000..fbdf34873 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -0,0 +1,276 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// kind represents an encoding type. +type kind uint8 + +const ( + _ kind = (1 << iota) / 2 + name + scalar + objectOpen + objectClose + arrayOpen + arrayClose +) + +// Encoder provides methods to write out JSON constructs and values. The user is +// responsible for producing valid sequences of JSON constructs and values. +type Encoder struct { + indent string + lastKind kind + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry for an Array or Object +// to be preceded by the indent and trailed by a newline. +func NewEncoder(indent string) (*Encoder, error) { + e := &Encoder{} + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space or tab characters") + } + e.indent = indent + } + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// WriteNull writes out the null value. +func (e *Encoder) WriteNull() { + e.prepareNext(scalar) + e.out = append(e.out, "null"...) +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + e.prepareNext(scalar) + if b { + e.out = append(e.out, "true"...) + } else { + e.out = append(e.out, "false"...) + } +} + +// WriteString writes out the given string in JSON string value. Returns error +// if input string contains invalid UTF-8. +func (e *Encoder) WriteString(s string) error { + e.prepareNext(scalar) + var err error + if e.out, err = appendString(e.out, s); err != nil { + return err + } + return nil +} + +// Sentinel error used for indicating invalid UTF-8. +var errInvalidUTF8 = errors.New("invalid UTF-8") + +func appendString(out []byte, in string) ([]byte, error) { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + return out, errInvalidUTF8 + case r < ' ' || r == '"' || r == '\\': + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\b': + out = append(out, 'b') + case '\f': + out = append(out, 'f') + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out, nil +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i, r := range s { + if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float and bitSize in JSON number value. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +// appendFloat formats given float in bitSize, and appends to the given []byte. +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, `"NaN"`...) + case math.IsInf(n, +1): + return append(out, `"Infinity"`...) + case math.IsInf(n, -1): + return append(out, `"-Infinity"`...) + } + + // JSON number formatting logic based on encoding/json. + // See floatEncoder.encode for reference. + fmt := byte('f') + if abs := math.Abs(n); abs != 0 { + if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || + bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + out = strconv.AppendFloat(out, n, fmt, -1, bitSize) + if fmt == 'e' { + n := len(out) + if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' { + out[n-2] = out[n-1] + out = out[:n-1] + } + } + return out +} + +// WriteInt writes out the given signed integer in JSON number value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatInt(n, 10)...) +} + +// WriteUint writes out the given unsigned integer in JSON number value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatUint(n, 10)...) +} + +// StartObject writes out the '{' symbol. +func (e *Encoder) StartObject() { + e.prepareNext(objectOpen) + e.out = append(e.out, '{') +} + +// EndObject writes out the '}' symbol. +func (e *Encoder) EndObject() { + e.prepareNext(objectClose) + e.out = append(e.out, '}') +} + +// WriteName writes out the given string in JSON string value and the name +// separator ':'. Returns error if input string contains invalid UTF-8, which +// should not be likely as protobuf field names should be valid. +func (e *Encoder) WriteName(s string) error { + e.prepareNext(name) + var err error + // Append to output regardless of error. + e.out, err = appendString(e.out, s) + e.out = append(e.out, ':') + return err +} + +// StartArray writes out the '[' symbol. +func (e *Encoder) StartArray() { + e.prepareNext(arrayOpen) + e.out = append(e.out, '[') +} + +// EndArray writes out the ']' symbol. +func (e *Encoder) EndArray() { + e.prepareNext(arrayClose) + e.out = append(e.out, ']') +} + +// prepareNext adds possible comma and indentation for the next value based +// on last type and indent option. It also updates lastKind to next. +func (e *Encoder) prepareNext(next kind) { + defer func() { + // Set lastKind to next. + e.lastKind = next + }() + + if len(e.indent) == 0 { + // Need to add comma on the following condition. + if e.lastKind&(scalar|objectClose|arrayClose) != 0 && + next&(name|scalar|objectOpen|arrayOpen) != 0 { + e.out = append(e.out, ',') + // For single-line output, add a random extra space after each + // comma to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + switch { + case e.lastKind&(objectOpen|arrayOpen) != 0: + // If next type is NOT closing, add indent and newline. + if next&(objectClose|arrayClose) == 0 { + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } + + case e.lastKind&(scalar|objectClose|arrayClose) != 0: + switch { + // If next type is either a value or name, add comma and newline. + case next&(name|scalar|objectOpen|arrayOpen) != 0: + e.out = append(e.out, ',', '\n') + + // If next type is a closing object or array, adjust indentation. + case next&(objectClose|arrayClose) != 0: + e.indents = e.indents[:len(e.indents)-len(e.indent)] + e.out = append(e.out, '\n') + } + e.out = append(e.out, e.indents...) + + case e.lastKind&name != 0: + e.out = append(e.out, ' ') + // For multi-line output, add a random extra space after key: to make + // output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index eb10ea102..37803773f 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -381,7 +381,7 @@ func (d *Decoder) currentOpenKind() (Kind, byte) { case '[': return ListOpen, ']' } - panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh))) + panic(fmt.Sprintf("Decoder: openStack contains invalid byte %c", openCh)) } func (d *Decoder) pushOpenStack(ch byte) { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go index aa66bdd06..da289ccce 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -263,3 +263,8 @@ func (e *Encoder) Snapshot() encoderState { func (e *Encoder) Reset(es encoderState) { e.encoderState = es } + +// AppendString appends the escaped form of the input string to b. +func AppendString(b []byte, s string) []byte { + return appendString(b, s, false) +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go index f90e909b3..fbcd34920 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.13 // +build !go1.13 package errors diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go index dc05f4191..5e72f1cde 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.13 // +build go1.13 package errors diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go index a72995f02..bda8e8cf3 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !protolegacy // +build !protolegacy package flags diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go index 772e2f0e4..6d8d9bd6b 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build protolegacy // +build protolegacy package flags diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go index 2706bb67f..4b15493f2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.12 // +build !go1.12 package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go index 1533ef600..0b31b66ea 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.12 // +build go1.12 package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go index 90705e3ae..145c577bd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index e118af1e2..757642e23 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index 949dc49a6..c65b0325c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -18,6 +18,7 @@ import ( ) var errDecode = errors.New("cannot parse invalid wire-format data") +var errRecursionDepth = errors.New("exceeded maximum recursion depth") type unmarshalOptions struct { flags protoiface.UnmarshalInputFlags @@ -25,6 +26,7 @@ type unmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + depth int } func (o unmarshalOptions) Options() proto.UnmarshalOptions { @@ -44,6 +46,7 @@ func (o unmarshalOptions) IsDefault() bool { var lazyUnmarshalOptions = unmarshalOptions{ resolver: preg.GlobalTypes, + depth: protowire.DefaultRecursionLimit, } type unmarshalOutput struct { @@ -62,6 +65,7 @@ func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutp out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{ flags: in.Flags, resolver: in.Resolver, + depth: in.Depth, }) var flags piface.UnmarshalOutputFlags if out.initialized { @@ -82,6 +86,10 @@ var errUnknown = errors.New("unknown") func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { mi.init() + opts.depth-- + if opts.depth < 0 { + return out, errRecursionDepth + } if flags.ProtoLegacy && mi.isMessageSet { return unmarshalMessageSet(mi, b, p, opts) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 3759b010c..029feeefd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -440,6 +440,13 @@ func legacyMerge(in piface.MergeInput) piface.MergeOutput { if !ok { return piface.MergeOutput{} } + if !in.Source.IsValid() { + // Legacy Marshal methods may not function on nil messages. + // Check for a typed nil source only after we confirm that + // legacy Marshal/Unmarshal methods are present, for + // consistency. + return piface.MergeOutput{Flags: piface.MergeComplete} + } b, err := marshaler.Marshal() if err != nil { return piface.MergeOutput{} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 9e3ed821e..4c491bdf4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 9ecf23a85..ee0e0573e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go index 85e074c97..a1f6f3338 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index 2160c7019..56a8a4ed3 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 5879131da..3d40d5249 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 26 + Minor = 28 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 49f9b8c88..11bf7173b 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -42,18 +42,25 @@ type UnmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m Message) error { - _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) + _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } _, err := o.unmarshal(b, m.ProtoReflect()) return err } @@ -63,6 +70,9 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // This method permits fine-grained control over the unmarshaler. // Most users should use Unmarshal instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } return o.unmarshal(in.Buf, in.Message) } @@ -86,12 +96,17 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto Message: m, Buf: b, Resolver: o.Resolver, + Depth: o.RecursionLimit, } if o.DiscardUnknown { in.Flags |= protoiface.UnmarshalDiscardUnknown } out, err = methods.Unmarshal(in) } else { + o.RecursionLimit-- + if o.RecursionLimit < 0 { + return out, errors.New("exceeded max recursion depth") + } err = o.unmarshalMessageSlow(b, m) } if err != nil { diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go index d8dd604f6..465e057b3 100644 --- a/vendor/google.golang.org/protobuf/proto/proto_methods.go +++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build !protoreflect // +build !protoreflect package proto diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go index b103d4320..494d6ceef 100644 --- a/vendor/google.golang.org/protobuf/proto/proto_reflect.go +++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build protoreflect // +build protoreflect package proto diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index 6be5d16e9..d5d5af6eb 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -53,6 +53,7 @@ type ( FindExtensionByName(field FullName) (ExtensionType, error) FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error) } + Depth int } unmarshalOutput = struct { pragma.NoUnkeyedLiterals diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 918e685e1..7ced876f4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 5a3414724..eb7764c30 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -41,6 +41,31 @@ import ( // Converting to/from a Value and a concrete Go value panics on type mismatch. // For example, ValueOf("hello").Int() panics because this attempts to // retrieve an int64 from a string. +// +// List, Map, and Message Values are called "composite" values. +// +// A composite Value may alias (reference) memory at some location, +// such that changes to the Value updates the that location. +// A composite value acquired with a Mutable method, such as Message.Mutable, +// always references the source object. +// +// For example: +// // Append a 0 to a "repeated int32" field. +// // Since the Value returned by Mutable is guaranteed to alias +// // the source message, modifying the Value modifies the message. +// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0)) +// +// // Assign [0] to a "repeated int32" field by creating a new Value, +// // modifying it, and assigning it. +// list := message.NewField(fieldDesc).(List) +// list.Append(protoreflect.ValueOfInt32(0)) +// message.Set(fieldDesc, list) +// // ERROR: Since it is not defined whether Set aliases the source, +// // appending to the List here may or may not modify the message. +// list.Append(protoreflect.ValueOfInt32(0)) +// +// Some operations, such as Message.Get, may return an "empty, read-only" +// composite Value. Modifying an empty, read-only value panics. type Value value // The protoreflect API uses a custom Value union type instead of interface{} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go index c45debdca..702ddf22a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 66dcbcd0d..59f024c44 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -94,7 +94,8 @@ type Files struct { // Note that enum values are in the top-level since that are in the same // scope as the parent enum. descsByName map[protoreflect.FullName]interface{} - filesByPath map[string]protoreflect.FileDescriptor + filesByPath map[string][]protoreflect.FileDescriptor + numFiles int } type packageDescriptor struct { @@ -117,17 +118,16 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { r.descsByName = map[protoreflect.FullName]interface{}{ "": &packageDescriptor{}, } - r.filesByPath = make(map[string]protoreflect.FileDescriptor) + r.filesByPath = make(map[string][]protoreflect.FileDescriptor) } path := file.Path() - if prev := r.filesByPath[path]; prev != nil { + if prev := r.filesByPath[path]; len(prev) > 0 { r.checkGenProtoConflict(path) err := errors.New("file %q is already registered", file.Path()) - err = amendErrorWithCaller(err, prev, file) - if r == GlobalFiles && ignoreConflict(file, err) { - err = nil + err = amendErrorWithCaller(err, prev[0], file) + if !(r == GlobalFiles && ignoreConflict(file, err)) { + return err } - return err } for name := file.Package(); name != ""; name = name.Parent() { @@ -168,7 +168,8 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { r.descsByName[d.FullName()] = d }) - r.filesByPath[path] = file + r.filesByPath[path] = append(r.filesByPath[path], file) + r.numFiles++ return nil } @@ -308,6 +309,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) { // FindFileByPath looks up a file by the path. // // This returns (nil, NotFound) if not found. +// This returns an error if multiple files have the same path. func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { if r == nil { return nil, NotFound @@ -316,13 +318,19 @@ func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) globalMutex.RLock() defer globalMutex.RUnlock() } - if fd, ok := r.filesByPath[path]; ok { - return fd, nil + fds := r.filesByPath[path] + switch len(fds) { + case 0: + return nil, NotFound + case 1: + return fds[0], nil + default: + return nil, errors.New("multiple files named %q", path) } - return nil, NotFound } -// NumFiles reports the number of registered files. +// NumFiles reports the number of registered files, +// including duplicate files with the same name. func (r *Files) NumFiles() int { if r == nil { return 0 @@ -331,10 +339,11 @@ func (r *Files) NumFiles() int { globalMutex.RLock() defer globalMutex.RUnlock() } - return len(r.filesByPath) + return r.numFiles } // RangeFiles iterates over all registered files while f returns true. +// If multiple files have the same name, RangeFiles iterates over all of them. // The iteration order is undefined. func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { if r == nil { @@ -344,9 +353,11 @@ func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { globalMutex.RLock() defer globalMutex.RUnlock() } - for _, file := range r.filesByPath { - if !f(file) { - return + for _, files := range r.filesByPath { + for _, file := range files { + if !f(file) { + return + } } } } diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 32c04f67e..44cf467d8 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -103,6 +103,7 @@ type UnmarshalInput = struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + Depth int } // UnmarshalOutput is output from the Unmarshal method. diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index f77239fc3..abe4ab511 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -43,7 +43,6 @@ package descriptorpb import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoiface "google.golang.org/protobuf/runtime/protoiface" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" @@ -829,15 +828,6 @@ func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3} } -var extRange_ExtensionRangeOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*ExtensionRangeOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_ExtensionRangeOptions -} - func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -1520,15 +1510,6 @@ func (*FileOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10} } -var extRange_FileOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use FileOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*FileOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_FileOptions -} - func (x *FileOptions) GetJavaPackage() string { if x != nil && x.JavaPackage != nil { return *x.JavaPackage @@ -1776,15 +1757,6 @@ func (*MessageOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{11} } -var extRange_MessageOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use MessageOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*MessageOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_MessageOptions -} - func (x *MessageOptions) GetMessageSetWireFormat() bool { if x != nil && x.MessageSetWireFormat != nil { return *x.MessageSetWireFormat @@ -1930,15 +1902,6 @@ func (*FieldOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12} } -var extRange_FieldOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use FieldOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*FieldOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_FieldOptions -} - func (x *FieldOptions) GetCtype() FieldOptions_CType { if x != nil && x.Ctype != nil { return *x.Ctype @@ -2030,15 +1993,6 @@ func (*OneofOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} } -var extRange_OneofOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use OneofOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*OneofOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_OneofOptions -} - func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2101,15 +2055,6 @@ func (*EnumOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{14} } -var extRange_EnumOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use EnumOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*EnumOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_EnumOptions -} - func (x *EnumOptions) GetAllowAlias() bool { if x != nil && x.AllowAlias != nil { return *x.AllowAlias @@ -2183,15 +2128,6 @@ func (*EnumValueOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{15} } -var extRange_EnumValueOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*EnumValueOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_EnumValueOptions -} - func (x *EnumValueOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2258,15 +2194,6 @@ func (*ServiceOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} } -var extRange_ServiceOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*ServiceOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_ServiceOptions -} - func (x *ServiceOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2335,15 +2262,6 @@ func (*MethodOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17} } -var extRange_MethodOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use MethodOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*MethodOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_MethodOptions -} - func (x *MethodOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated diff --git a/vendor/modules.txt b/vendor/modules.txt index 44f18d5ce..6b38e8c5e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -59,8 +59,9 @@ github.com/go-graphite/carbonzipper/zipper/httpHeaders # github.com/go-graphite/go-whisper v0.0.0-20220504075317-5035f072cad3 ## explicit github.com/go-graphite/go-whisper -# github.com/go-graphite/protocol v1.0.1-0.20210605224534-ac8ad6ab1f97 +# github.com/go-graphite/protocol v1.0.1-0.20220623155218-0727176fe6f6 ## explicit; go 1.16 +github.com/go-graphite/protocol/carbonapi_v2_grpc github.com/go-graphite/protocol/carbonapi_v2_pb github.com/go-graphite/protocol/carbonapi_v3_pb # github.com/gogo/protobuf v1.3.2 @@ -71,9 +72,10 @@ github.com/gogo/protobuf/protoc-gen-gogo/descriptor # github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e ## explicit github.com/golang/groupcache/lru -# github.com/golang/protobuf v1.5.0 +# github.com/golang/protobuf v1.5.2 ## explicit; go 1.9 github.com/golang/protobuf/internal/gengogrpc +github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/protoc-gen-go github.com/golang/protobuf/protoc-gen-go/descriptor @@ -85,7 +87,7 @@ github.com/golang/protobuf/ptypes/timestamp # github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db ## explicit github.com/golang/snappy -# github.com/google/go-cmp v0.5.5 +# github.com/google/go-cmp v0.5.6 ## explicit; go 1.8 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff @@ -167,7 +169,7 @@ github.com/rcrowley/go-metrics # github.com/sevlyar/go-daemon v0.1.6-0.20210508104436-15bb82c8ea3c ## explicit github.com/sevlyar/go-daemon -# github.com/stretchr/testify v1.6.1 +# github.com/stretchr/testify v1.7.0 ## explicit; go 1.13 github.com/stretchr/testify/assert # github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d @@ -360,8 +362,8 @@ google.golang.org/genproto/googleapis/pubsub/v1 google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.38.0 -## explicit; go 1.11 +# google.golang.org/grpc v1.47.0 +## explicit; go 1.14 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -372,6 +374,7 @@ google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials @@ -383,12 +386,14 @@ google.golang.org/grpc/credentials/alts/internal/handshaker google.golang.org/grpc/credentials/alts/internal/handshaker/service google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp google.golang.org/grpc/credentials/google +google.golang.org/grpc/credentials/insecure google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer @@ -401,6 +406,7 @@ google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough @@ -420,16 +426,18 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.26.0 -## explicit; go 1.9 +# google.golang.org/protobuf v1.28.0 +## explicit; go 1.11 google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo google.golang.org/protobuf/compiler/protogen +google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand google.golang.org/protobuf/internal/encoding/defval +google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag google.golang.org/protobuf/internal/encoding/text From fa9d52a295ffbf41812f394112871e0b0b50cbac Mon Sep 17 00:00:00 2001 From: Emad Mohamadi Date: Wed, 29 Jun 2022 17:10:22 +0200 Subject: [PATCH 02/11] Add gRPC listener with render functionality --- carbon/app.go | 6 + carbon/config.go | 39 +++--- carbonserver/carbonserver.go | 26 ++++ carbonserver/render.go | 241 +++++++++++++++++++++++++++++++++-- go-carbon.conf.example | 6 + 5 files changed, 291 insertions(+), 27 deletions(-) diff --git a/carbon/app.go b/carbon/app.go index 1c3575e35..f5563f2fc 100644 --- a/carbon/app.go +++ b/carbon/app.go @@ -594,6 +594,12 @@ func (app *App) Start(version string) (err error) { return } + if conf.Carbonserver.Grpc.Enabled { + if err = carbonserver.ListenGRPC(conf.Carbonserver.Grpc.Listen); err != nil { + return + } + } + app.Carbonserver = carbonserver } /* CARBONSERVER end */ diff --git a/carbon/config.go b/carbon/config.go index 18c4bdf6c..71034930f 100644 --- a/carbon/config.go +++ b/carbon/config.go @@ -103,25 +103,26 @@ type tagsConfig struct { } type carbonserverConfig struct { - Listen string `toml:"listen"` - Enabled bool `toml:"enabled"` - ReadTimeout *Duration `toml:"read-timeout"` - IdleTimeout *Duration `toml:"idle-timeout"` - WriteTimeout *Duration `toml:"write-timeout"` - RequestTimeout *Duration `toml:"request-timeout"` - ScanFrequency *Duration `toml:"scan-frequency"` - QueryCacheEnabled bool `toml:"query-cache-enabled"` - QueryCacheSizeMB int `toml:"query-cache-size-mb"` - FindCacheEnabled bool `toml:"find-cache-enabled"` - Buckets int `toml:"buckets"` - MaxGlobs int `toml:"max-globs"` - FailOnMaxGlobs bool `toml:"fail-on-max-globs"` - EmptyResultOk bool `toml:"empty-result-ok"` - MetricsAsCounters bool `toml:"metrics-as-counters"` - TrigramIndex bool `toml:"trigram-index"` - InternalStatsDir string `toml:"internal-stats-dir"` - Percentiles []int `toml:"stats-percentiles"` - CacheScan bool `toml:"cache-scan"` + Listen string `toml:"listen"` + Enabled bool `toml:"enabled"` + Grpc grpcConfig `toml:"grpc"` + ReadTimeout *Duration `toml:"read-timeout"` + IdleTimeout *Duration `toml:"idle-timeout"` + WriteTimeout *Duration `toml:"write-timeout"` + RequestTimeout *Duration `toml:"request-timeout"` + ScanFrequency *Duration `toml:"scan-frequency"` + QueryCacheEnabled bool `toml:"query-cache-enabled"` + QueryCacheSizeMB int `toml:"query-cache-size-mb"` + FindCacheEnabled bool `toml:"find-cache-enabled"` + Buckets int `toml:"buckets"` + MaxGlobs int `toml:"max-globs"` + FailOnMaxGlobs bool `toml:"fail-on-max-globs"` + EmptyResultOk bool `toml:"empty-result-ok"` + MetricsAsCounters bool `toml:"metrics-as-counters"` + TrigramIndex bool `toml:"trigram-index"` + InternalStatsDir string `toml:"internal-stats-dir"` + Percentiles []int `toml:"stats-percentiles"` + CacheScan bool `toml:"cache-scan"` MaxMetricsGlobbed int `toml:"max-metrics-globbed"` MaxMetricsRendered int `toml:"max-metrics-rendered"` diff --git a/carbonserver/carbonserver.go b/carbonserver/carbonserver.go index 2a92297c8..964274c8a 100644 --- a/carbonserver/carbonserver.go +++ b/carbonserver/carbonserver.go @@ -22,6 +22,8 @@ import ( "encoding/json" "errors" "fmt" + "github.com/go-graphite/protocol/carbonapi_v2_grpc" + "google.golang.org/grpc" "io" "math" "net" @@ -208,6 +210,7 @@ func (q *queryCache) getQueryItem(k string, size uint64, expire int32) *QueryIte } type CarbonserverListener struct { + carbonapi_v2_grpc.UnimplementedCarbonV2Server helper.Stoppable cacheGet func(key string) []points.Point readTimeout time.Duration @@ -224,6 +227,7 @@ type CarbonserverListener struct { forceScanChan chan struct{} metricsAsCounters bool tcpListener *net.TCPListener + grpcListener *net.TCPListener logger *zap.Logger accessLogger *zap.Logger internalStatsDir string @@ -1566,6 +1570,7 @@ func (listener *CarbonserverListener) Stop() error { listener.db.Close() } listener.tcpListener.Close() + listener.grpcListener.Close() return nil } @@ -1976,3 +1981,24 @@ func NewApiPerPathRatelimiter(maxInflightRequests uint, timeout time.Duration) * timeout: timeout, } } + +func (listener *CarbonserverListener) ListenGRPC(listen string) error { + var err error + var grpcAddr *net.TCPAddr + grpcAddr, err = net.ResolveTCPAddr("tcp", listen) + if err != nil { + return err + } + + listener.grpcListener, err = net.ListenTCP("tcp", grpcAddr) + if err != nil { + return err + } + + var opts []grpc.ServerOption + + grpcServer := grpc.NewServer(opts...) + carbonapi_v2_grpc.RegisterCarbonV2Server(grpcServer, listener) + go grpcServer.Serve(listener.grpcListener) + return nil +} diff --git a/carbonserver/render.go b/carbonserver/render.go index 463906e5f..34ac60c4f 100644 --- a/carbonserver/render.go +++ b/carbonserver/render.go @@ -5,6 +5,9 @@ import ( "context" "encoding/json" "fmt" + "github.com/go-graphite/protocol/carbonapi_v2_grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "io/ioutil" "math" "net/http" @@ -91,6 +94,30 @@ func getFormat(req *http.Request) (responseFormat, error) { return formatCode, nil } +func getProtoV3Targets(request *protov3.MultiFetchRequest) map[timeRange][]target { + targets := make(map[timeRange][]target) + for _, t := range request.Metrics { + tr := timeRange{ + from: int32(t.StartTime), + until: int32(t.StopTime), + } + targets[tr] = append(targets[tr], target{Name: t.Name, PathExpression: t.PathExpression}) + } + return targets +} + +func getProtoV2Targets(request *protov2.MultiFetchRequest) map[timeRange][]target { + targets := make(map[timeRange][]target) + for _, t := range request.Metrics { + tr := timeRange{ + from: t.StartTime, + until: t.StopTime, + } + targets[tr] = append(targets[tr], target{Name: t.Name, PathExpression: t.Name}) + } + return targets +} + func getTargets(req *http.Request, format responseFormat) (map[timeRange][]target, error) { targets := make(map[timeRange][]target) @@ -106,14 +133,7 @@ func getTargets(req *http.Request, format responseFormat) (map[timeRange][]targe return targets, fmt.Errorf("invalid payload: %s", err.Error()) } - for _, t := range pv3Request.Metrics { - tr := timeRange{ - from: int32(t.StartTime), - until: int32(t.StopTime), - } - targets[tr] = append(targets[tr], target{Name: t.Name, PathExpression: t.PathExpression}) - } - + targets = getProtoV3Targets(&pv3Request) default: from, err := stringToInt32(req.FormValue("from")) if err != nil { @@ -333,6 +353,91 @@ func (listener *CarbonserverListener) fetchWithCache(ctx context.Context, logger return response, fromCache, err } +func getUniqueMetricNames(targets map[timeRange][]target) []string { + metricMap := make(map[string]bool) + + for _, ts := range targets { + for _, metric := range ts { + metricMap[metric.Name] = true + } + } + metricNames := make([]string, len(metricMap)) + i := 0 + for k := range metricMap { + metricNames[i] = k + i++ + } + return metricNames +} + +func (listener *CarbonserverListener) prepareDataProtoStream(ctx context.Context, targets map[timeRange][]target, expandedGlobs []globs, responseChan chan *protov2.FetchResponse) { + defer close(responseChan) + if len(expandedGlobs) == 0 { + return + } + metricGlobMap := make(map[string]globs) + for _, expandedGlob := range expandedGlobs { + metricGlobMap[strings.ReplaceAll(expandedGlob.Name, "/", ".")] = expandedGlob + } + + for tr, ts := range targets { + for _, metric := range ts { + select { + case <-ctx.Done(): + return + default: + } + fromTime := tr.from + untilTime := tr.until + + listener.logger.Debug("fetching data...") + if expandedResult, ok := metricGlobMap[metric.Name]; ok { + files, leafs := expandedResult.Files, expandedResult.Leafs + if len(files) > listener.maxMetricsRendered { + listener.accessLogger.Error( + "rendering too many metrics", + zap.Int("limit", listener.maxMetricsRendered), + zap.Int("target", len(files)), + ) + + files = files[:listener.maxMetricsRendered] + leafs = leafs[:listener.maxMetricsRendered] + } + + metricsCount := 0 + for i := range files { + if leafs[i] { + metricsCount++ + } + } + listener.accessLogger.Debug("expandGlobs result", + zap.String("handler", "render"), + zap.String("action", "expandGlobs"), + zap.String("metric", metric.Name), + zap.Int("metrics_count", metricsCount), + zap.Int32("from", fromTime), + zap.Int32("until", untilTime), + ) + res, err := listener.fetchDataPB(metric.Name, files, leafs, fromTime, untilTime) + if err != nil { + atomic.AddUint64(&listener.metrics.RenderErrors, 1) + listener.accessLogger.Error("error while fetching the data", + zap.Error(err), + ) + continue + } + for i := range res.Metrics { + responseChan <- res.Metrics[i] + } + } else { + listener.accessLogger.Debug("expand globs returned an error", + zap.String("metricName", metric.Name)) + continue + } + } + } +} + func (listener *CarbonserverListener) prepareDataProto(ctx context.Context, logger *zap.Logger, format responseFormat, targets map[timeRange][]target) (fetchResponse, error) { contentType := "application/text" var b []byte @@ -568,3 +673,123 @@ func (listener *CarbonserverListener) fetchDataPB(metric string, files []string, } return &multi, nil } + +func (listener *CarbonserverListener) Render(req *protov2.MultiFetchRequest, stream carbonapi_v2_grpc.CarbonV2_RenderServer) (rpcErr error) { + t0 := time.Now() + ctx := context.Background() + + atomic.AddUint64(&listener.metrics.RenderRequests, 1) + + accessLogger := TraceContextToZap(ctx, listener.accessLogger.With( + zap.String("handler", "renderStream"), + zap.String("request_payload", req.String()), + )) + + format := protoV2Format + accessLogger = accessLogger.With( + zap.String("format", format.String()), + ) + + targets := getProtoV2Targets(req) + tgs := getTargetNames(targets) + accessLogger = accessLogger.With( + zap.Strings("targets", tgs), + ) + + logger := TraceContextToZap(ctx, listener.accessLogger.With( + zap.String("handler", "render"), + zap.String("request_payload", req.String()), + zap.Strings("targets", tgs), + zap.String("format", format.String()), + )) + + // Make sure we log which metric caused a panic() + defer func() { + if r := recover(); r != nil { + logger.Error("panic recovered", + zap.Stack("stack"), + zap.Any("error", r), + ) + accessLogger.Error("fetch failed", + zap.Duration("runtime_seconds", time.Since(t0)), + zap.String("reason", "panic during serving the request"), + zap.Stack("stack"), + zap.Any("error", r), + zap.Int("http_code", http.StatusInternalServerError), + ) + rpcErr = status.New(codes.Internal, "Panic occured, see logs for more information").Err() + } + }() + + // TODO: implementing cache? + fromCache := false + + // TODO: chan buffer size should be configurable + responseChan := make(chan *protov2.FetchResponse, 1000) + + metricNames := getUniqueMetricNames(targets) + // TODO: pipeline? + expandedGlobs, err := listener.getExpandedGlobs(ctx, logger, time.Now(), metricNames) + if expandedGlobs == nil { + if err != nil { + return status.New(codes.InvalidArgument, err.Error()).Err() + } + } + + go listener.prepareDataProtoStream(ctx, targets, expandedGlobs, responseChan) + + metricsFetched := 0 + valuesFetched := 0 + // TODO: configurable? + metricAccessBatchSize := 100 + metricAccessBatch := make([]string, 0, metricAccessBatchSize) + for renderResponse := range responseChan { + err := stream.Send(renderResponse) + if err != nil { + atomic.AddUint64(&listener.metrics.RenderErrors, 1) + accessLogger.Error("fetch failed", + zap.Duration("runtime_seconds", time.Since(t0)), + zap.String("reason", "stream send failed"), + zap.Error(err), + ) + return err + } + metricsFetched++ + valuesFetched += len(renderResponse.Values) + if listener.internalStatsDir != "" { + metricAccessBatch = append(metricAccessBatch, renderResponse.Name) + if len(metricAccessBatch) >= metricAccessBatchSize { + listener.UpdateMetricsAccessTimesByRequest(metricAccessBatch) + metricAccessBatch = metricAccessBatch[:0] + } + } + } + + if metricsFetched == 0 && !listener.emptyResultOk { + atomic.AddUint64(&listener.metrics.RenderErrors, 1) + accessLogger.Error("fetch failed", + zap.Duration("runtime_seconds", time.Since(t0)), + zap.String("reason", "no metrics found"), + zap.Error(err), + ) + return status.New(codes.NotFound, "no metrics found").Err() + } + + logger.Info("fetch served", + zap.Duration("runtime_seconds", time.Since(t0)), + zap.Bool("query_cache_enabled", listener.queryCacheEnabled), + zap.Bool("from_cache", fromCache), + zap.Int("metrics_fetched", metricsFetched), + zap.Int("values_fetched", valuesFetched), + zap.Int("http_code", http.StatusOK), + ) + + span := trace.SpanFromContext(ctx) + span.SetAttributes( + kv.Bool("graphite.from_cache", fromCache), + kv.Int("graphite.metrics", metricsFetched), + kv.Int("graphite.values", valuesFetched), + ) + + return nil +} diff --git a/go-carbon.conf.example b/go-carbon.conf.example index a254d846f..eec80ceed 100644 --- a/go-carbon.conf.example +++ b/go-carbon.conf.example @@ -426,6 +426,12 @@ stats-percentiles = [99, 98, 95, 75, 50] # path = "/metrics/list_query/" # max-inflight-requests = 3 +# carbonserver.grpc is the configuration for listening for grpc clients. +# [carbonserver.grpc] +# enabled = true +# listen = ":7004" + + [dump] # Enable dump/restore function on USR2 signal enabled = false From 1d22d4c79b61abc5e0b93ee9804491311031da11 Mon Sep 17 00:00:00 2001 From: Emad Mohamadi Date: Wed, 29 Jun 2022 18:31:54 +0200 Subject: [PATCH 03/11] Add peer data to grpc render logs --- carbonserver/render.go | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/carbonserver/render.go b/carbonserver/render.go index 34ac60c4f..443e52e0a 100644 --- a/carbonserver/render.go +++ b/carbonserver/render.go @@ -7,6 +7,7 @@ import ( "fmt" "github.com/go-graphite/protocol/carbonapi_v2_grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" "google.golang.org/grpc/status" "io/ioutil" "math" @@ -676,31 +677,37 @@ func (listener *CarbonserverListener) fetchDataPB(metric string, files []string, func (listener *CarbonserverListener) Render(req *protov2.MultiFetchRequest, stream carbonapi_v2_grpc.CarbonV2_RenderServer) (rpcErr error) { t0 := time.Now() - ctx := context.Background() + ctx := stream.Context() + + var reqPeer string + p, ok := peer.FromContext(stream.Context()) + if ok { + reqPeer = p.Addr.String() + } atomic.AddUint64(&listener.metrics.RenderRequests, 1) accessLogger := TraceContextToZap(ctx, listener.accessLogger.With( - zap.String("handler", "renderStream"), + zap.String("handler", "grpc-render"), zap.String("request_payload", req.String()), + zap.String("peer", reqPeer), )) format := protoV2Format - accessLogger = accessLogger.With( - zap.String("format", format.String()), - ) targets := getProtoV2Targets(req) tgs := getTargetNames(targets) accessLogger = accessLogger.With( + zap.String("format", format.String()), zap.Strings("targets", tgs), ) - logger := TraceContextToZap(ctx, listener.accessLogger.With( - zap.String("handler", "render"), + logger := TraceContextToZap(ctx, listener.logger.With( + zap.String("handler", "grpc-render"), zap.String("request_payload", req.String()), zap.Strings("targets", tgs), zap.String("format", format.String()), + zap.String("peer", reqPeer), )) // Make sure we log which metric caused a panic() From 423a4f2e3f6d826cca65e1d487b152850a3004c5 Mon Sep 17 00:00:00 2001 From: Emad Mohamadi Date: Thu, 30 Jun 2022 16:04:32 +0200 Subject: [PATCH 04/11] Catchup with protocol without unnecessary streams --- go.mod | 2 +- go.sum | 2 + .../carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go | 54 +++--- .../carbonapi_v2_grpc/carbonapi_v2_grpc.proto | 4 +- .../carbonapi_v2_grpc_vtproto.pb.go | 162 ++++++------------ .../carbonapi_v2_pb/carbonapi_v2_pb.pb.go | 161 ++++++----------- .../carbonapi_v2_pb/carbonapi_v2_pb.proto | 4 - .../carbonapi_v2_pb_vtproto.pb.go | 151 ---------------- vendor/modules.txt | 2 +- 9 files changed, 136 insertions(+), 406 deletions(-) diff --git a/go.mod b/go.mod index 2ce596598..f24fb2906 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/dgryski/httputil v0.0.0-20160116060654-189c2918cd08 github.com/go-graphite/carbonzipper v0.0.0-20180329125635-fedce067a794 github.com/go-graphite/go-whisper v0.0.0-20220504075317-5035f072cad3 - github.com/go-graphite/protocol v1.0.1-0.20220623155218-0727176fe6f6 + github.com/go-graphite/protocol v1.0.1-0.20220630134049-92705234d05d github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect github.com/google/go-cmp v0.5.6 diff --git a/go.sum b/go.sum index 99737f282..991172639 100644 --- a/go.sum +++ b/go.sum @@ -90,6 +90,8 @@ github.com/go-graphite/go-whisper v0.0.0-20220504075317-5035f072cad3 h1:QlegEOON github.com/go-graphite/go-whisper v0.0.0-20220504075317-5035f072cad3/go.mod h1:XnU8q9X8C3OhD8R7gAlRLI6VXU2KMwqgTO9Tshub2qE= github.com/go-graphite/protocol v1.0.1-0.20220623155218-0727176fe6f6 h1:o6x5biB/GOeCgdxgr1JrDHzKkj7jVLH1Mx/P293BLb0= github.com/go-graphite/protocol v1.0.1-0.20220623155218-0727176fe6f6/go.mod h1:puWkW2DFZi46CaLY1rxYhkDiBDFlpaTDaMjcE/Cd9gw= +github.com/go-graphite/protocol v1.0.1-0.20220630134049-92705234d05d h1:f9YLmzZytXwaYpFDMMqoEj6OmWQc/WOenF/gwgsHB0s= +github.com/go-graphite/protocol v1.0.1-0.20220630134049-92705234d05d/go.mod h1:puWkW2DFZi46CaLY1rxYhkDiBDFlpaTDaMjcE/Cd9gw= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go index 4c34853e1..72e40575c 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go @@ -33,39 +33,39 @@ var file_carbonapi_v2_grpc_proto_rawDesc = []byte{ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2f, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x32, 0xcb, 0x03, 0x0a, 0x08, 0x43, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x56, 0x32, 0x12, 0x50, 0x0a, + 0x32, 0xcc, 0x03, 0x0a, 0x08, 0x43, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x56, 0x32, 0x12, 0x50, 0x0a, 0x06, 0x52, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x22, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x46, 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, - 0x47, 0x0a, 0x04, 0x46, 0x69, 0x6e, 0x64, 0x12, 0x1c, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, + 0x45, 0x0a, 0x04, 0x46, 0x69, 0x6e, 0x64, 0x12, 0x1c, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x43, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, - 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1f, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, - 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x45, 0x0a, - 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, - 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, - 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, - 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x4d, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x1a, 0x28, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, - 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x33, - 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2d, - 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x74, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x2f, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x67, - 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x24, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, + 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, + 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, + 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, + 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, + 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x4d, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x28, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, + 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, + 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, + 0x2d, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x74, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x2f, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, + 0x67, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_carbonapi_v2_grpc_proto_goTypes = []interface{}{ @@ -75,7 +75,7 @@ var file_carbonapi_v2_grpc_proto_goTypes = []interface{}{ (*carbonapi_v2_pb.InfoRequest)(nil), // 3: carbonapi_v2_pb.InfoRequest (*carbonapi_v2_pb.FetchResponse)(nil), // 4: carbonapi_v2_pb.FetchResponse (*carbonapi_v2_pb.GlobResponse)(nil), // 5: carbonapi_v2_pb.GlobResponse - (*carbonapi_v2_pb.MetricResponse)(nil), // 6: carbonapi_v2_pb.MetricResponse + (*carbonapi_v2_pb.ListMetricsResponse)(nil), // 6: carbonapi_v2_pb.ListMetricsResponse (*carbonapi_v2_pb.InfoResponse)(nil), // 7: carbonapi_v2_pb.InfoResponse (*carbonapi_v2_pb.MetricDetailsResponse)(nil), // 8: carbonapi_v2_pb.MetricDetailsResponse (*carbonapi_v2_pb.ProtocolVersionResponse)(nil), // 9: carbonapi_v2_pb.ProtocolVersionResponse @@ -89,7 +89,7 @@ var file_carbonapi_v2_grpc_proto_depIdxs = []int32{ 2, // 5: carbonapi_v2_grpc.CarbonV2.Version:input_type -> google.protobuf.Empty 4, // 6: carbonapi_v2_grpc.CarbonV2.Render:output_type -> carbonapi_v2_pb.FetchResponse 5, // 7: carbonapi_v2_grpc.CarbonV2.Find:output_type -> carbonapi_v2_pb.GlobResponse - 6, // 8: carbonapi_v2_grpc.CarbonV2.List:output_type -> carbonapi_v2_pb.MetricResponse + 6, // 8: carbonapi_v2_grpc.CarbonV2.List:output_type -> carbonapi_v2_pb.ListMetricsResponse 7, // 9: carbonapi_v2_grpc.CarbonV2.Info:output_type -> carbonapi_v2_pb.InfoResponse 8, // 10: carbonapi_v2_grpc.CarbonV2.Stats:output_type -> carbonapi_v2_pb.MetricDetailsResponse 9, // 11: carbonapi_v2_grpc.CarbonV2.Version:output_type -> carbonapi_v2_pb.ProtocolVersionResponse diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto index 55aff7cc2..9d8ce75a6 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto @@ -8,8 +8,8 @@ import "github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.proto"; service CarbonV2 { rpc Render(carbonapi_v2_pb.MultiFetchRequest) returns (stream carbonapi_v2_pb.FetchResponse) {} - rpc Find(carbonapi_v2_pb.GlobRequest) returns (stream carbonapi_v2_pb.GlobResponse) {} - rpc List(google.protobuf.Empty) returns (stream carbonapi_v2_pb.MetricResponse) {} + rpc Find(carbonapi_v2_pb.GlobRequest) returns (carbonapi_v2_pb.GlobResponse) {} + rpc List(google.protobuf.Empty) returns (carbonapi_v2_pb.ListMetricsResponse) {} rpc Info(carbonapi_v2_pb.InfoRequest) returns (carbonapi_v2_pb.InfoResponse) {} rpc Stats(google.protobuf.Empty) returns (carbonapi_v2_pb.MetricDetailsResponse) {} rpc Version(google.protobuf.Empty) returns (carbonapi_v2_pb.ProtocolVersionResponse) {} diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc_vtproto.pb.go b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc_vtproto.pb.go index fd79a9f13..98edf4700 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc_vtproto.pb.go +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc_vtproto.pb.go @@ -31,8 +31,8 @@ const _ = grpc.SupportPackageIsVersion7 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type CarbonV2Client interface { Render(ctx context.Context, in *carbonapi_v2_pb.MultiFetchRequest, opts ...grpc.CallOption) (CarbonV2_RenderClient, error) - Find(ctx context.Context, in *carbonapi_v2_pb.GlobRequest, opts ...grpc.CallOption) (CarbonV2_FindClient, error) - List(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (CarbonV2_ListClient, error) + Find(ctx context.Context, in *carbonapi_v2_pb.GlobRequest, opts ...grpc.CallOption) (*carbonapi_v2_pb.GlobResponse, error) + List(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*carbonapi_v2_pb.ListMetricsResponse, error) Info(ctx context.Context, in *carbonapi_v2_pb.InfoRequest, opts ...grpc.CallOption) (*carbonapi_v2_pb.InfoResponse, error) Stats(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*carbonapi_v2_pb.MetricDetailsResponse, error) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*carbonapi_v2_pb.ProtocolVersionResponse, error) @@ -78,68 +78,22 @@ func (x *carbonV2RenderClient) Recv() (*carbonapi_v2_pb.FetchResponse, error) { return m, nil } -func (c *carbonV2Client) Find(ctx context.Context, in *carbonapi_v2_pb.GlobRequest, opts ...grpc.CallOption) (CarbonV2_FindClient, error) { - stream, err := c.cc.NewStream(ctx, &CarbonV2_ServiceDesc.Streams[1], "/carbonapi_v2_grpc.CarbonV2/Find", opts...) +func (c *carbonV2Client) Find(ctx context.Context, in *carbonapi_v2_pb.GlobRequest, opts ...grpc.CallOption) (*carbonapi_v2_pb.GlobResponse, error) { + out := new(carbonapi_v2_pb.GlobResponse) + err := c.cc.Invoke(ctx, "/carbonapi_v2_grpc.CarbonV2/Find", in, out, opts...) if err != nil { return nil, err } - x := &carbonV2FindClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type CarbonV2_FindClient interface { - Recv() (*carbonapi_v2_pb.GlobResponse, error) - grpc.ClientStream -} - -type carbonV2FindClient struct { - grpc.ClientStream -} - -func (x *carbonV2FindClient) Recv() (*carbonapi_v2_pb.GlobResponse, error) { - m := new(carbonapi_v2_pb.GlobResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil + return out, nil } -func (c *carbonV2Client) List(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (CarbonV2_ListClient, error) { - stream, err := c.cc.NewStream(ctx, &CarbonV2_ServiceDesc.Streams[2], "/carbonapi_v2_grpc.CarbonV2/List", opts...) +func (c *carbonV2Client) List(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*carbonapi_v2_pb.ListMetricsResponse, error) { + out := new(carbonapi_v2_pb.ListMetricsResponse) + err := c.cc.Invoke(ctx, "/carbonapi_v2_grpc.CarbonV2/List", in, out, opts...) if err != nil { return nil, err } - x := &carbonV2ListClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type CarbonV2_ListClient interface { - Recv() (*carbonapi_v2_pb.MetricResponse, error) - grpc.ClientStream -} - -type carbonV2ListClient struct { - grpc.ClientStream -} - -func (x *carbonV2ListClient) Recv() (*carbonapi_v2_pb.MetricResponse, error) { - m := new(carbonapi_v2_pb.MetricResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil + return out, nil } func (c *carbonV2Client) Info(ctx context.Context, in *carbonapi_v2_pb.InfoRequest, opts ...grpc.CallOption) (*carbonapi_v2_pb.InfoResponse, error) { @@ -174,8 +128,8 @@ func (c *carbonV2Client) Version(ctx context.Context, in *emptypb.Empty, opts .. // for forward compatibility type CarbonV2Server interface { Render(*carbonapi_v2_pb.MultiFetchRequest, CarbonV2_RenderServer) error - Find(*carbonapi_v2_pb.GlobRequest, CarbonV2_FindServer) error - List(*emptypb.Empty, CarbonV2_ListServer) error + Find(context.Context, *carbonapi_v2_pb.GlobRequest) (*carbonapi_v2_pb.GlobResponse, error) + List(context.Context, *emptypb.Empty) (*carbonapi_v2_pb.ListMetricsResponse, error) Info(context.Context, *carbonapi_v2_pb.InfoRequest) (*carbonapi_v2_pb.InfoResponse, error) Stats(context.Context, *emptypb.Empty) (*carbonapi_v2_pb.MetricDetailsResponse, error) Version(context.Context, *emptypb.Empty) (*carbonapi_v2_pb.ProtocolVersionResponse, error) @@ -189,11 +143,11 @@ type UnimplementedCarbonV2Server struct { func (UnimplementedCarbonV2Server) Render(*carbonapi_v2_pb.MultiFetchRequest, CarbonV2_RenderServer) error { return status.Errorf(codes.Unimplemented, "method Render not implemented") } -func (UnimplementedCarbonV2Server) Find(*carbonapi_v2_pb.GlobRequest, CarbonV2_FindServer) error { - return status.Errorf(codes.Unimplemented, "method Find not implemented") +func (UnimplementedCarbonV2Server) Find(context.Context, *carbonapi_v2_pb.GlobRequest) (*carbonapi_v2_pb.GlobResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Find not implemented") } -func (UnimplementedCarbonV2Server) List(*emptypb.Empty, CarbonV2_ListServer) error { - return status.Errorf(codes.Unimplemented, "method List not implemented") +func (UnimplementedCarbonV2Server) List(context.Context, *emptypb.Empty) (*carbonapi_v2_pb.ListMetricsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") } func (UnimplementedCarbonV2Server) Info(context.Context, *carbonapi_v2_pb.InfoRequest) (*carbonapi_v2_pb.InfoResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") @@ -238,46 +192,40 @@ func (x *carbonV2RenderServer) Send(m *carbonapi_v2_pb.FetchResponse) error { return x.ServerStream.SendMsg(m) } -func _CarbonV2_Find_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(carbonapi_v2_pb.GlobRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func _CarbonV2_Find_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(carbonapi_v2_pb.GlobRequest) + if err := dec(in); err != nil { + return nil, err } - return srv.(CarbonV2Server).Find(m, &carbonV2FindServer{stream}) -} - -type CarbonV2_FindServer interface { - Send(*carbonapi_v2_pb.GlobResponse) error - grpc.ServerStream -} - -type carbonV2FindServer struct { - grpc.ServerStream -} - -func (x *carbonV2FindServer) Send(m *carbonapi_v2_pb.GlobResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _CarbonV2_List_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(emptypb.Empty) - if err := stream.RecvMsg(m); err != nil { - return err + if interceptor == nil { + return srv.(CarbonV2Server).Find(ctx, in) } - return srv.(CarbonV2Server).List(m, &carbonV2ListServer{stream}) -} - -type CarbonV2_ListServer interface { - Send(*carbonapi_v2_pb.MetricResponse) error - grpc.ServerStream -} - -type carbonV2ListServer struct { - grpc.ServerStream + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/carbonapi_v2_grpc.CarbonV2/Find", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CarbonV2Server).Find(ctx, req.(*carbonapi_v2_pb.GlobRequest)) + } + return interceptor(ctx, in, info, handler) } -func (x *carbonV2ListServer) Send(m *carbonapi_v2_pb.MetricResponse) error { - return x.ServerStream.SendMsg(m) +func _CarbonV2_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CarbonV2Server).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/carbonapi_v2_grpc.CarbonV2/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CarbonV2Server).List(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) } func _CarbonV2_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -341,6 +289,14 @@ var CarbonV2_ServiceDesc = grpc.ServiceDesc{ ServiceName: "carbonapi_v2_grpc.CarbonV2", HandlerType: (*CarbonV2Server)(nil), Methods: []grpc.MethodDesc{ + { + MethodName: "Find", + Handler: _CarbonV2_Find_Handler, + }, + { + MethodName: "List", + Handler: _CarbonV2_List_Handler, + }, { MethodName: "Info", Handler: _CarbonV2_Info_Handler, @@ -360,16 +316,6 @@ var CarbonV2_ServiceDesc = grpc.ServiceDesc{ Handler: _CarbonV2_Render_Handler, ServerStreams: true, }, - { - StreamName: "Find", - Handler: _CarbonV2_Find_Handler, - ServerStreams: true, - }, - { - StreamName: "List", - Handler: _CarbonV2_List_Handler, - ServerStreams: true, - }, }, Metadata: "carbonapi_v2_grpc.proto", } diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.pb.go b/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.pb.go index f98f0e776..e73c67b9c 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.pb.go +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.pb.go @@ -751,53 +751,6 @@ func (x *ListMetricsResponse) GetMetrics() []string { return nil } -type MetricResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *MetricResponse) Reset() { - *x = MetricResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MetricResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MetricResponse) ProtoMessage() {} - -func (x *MetricResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MetricResponse.ProtoReflect.Descriptor instead. -func (*MetricResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{13} -} - -func (x *MetricResponse) GetName() string { - if x != nil { - return x.Name - } - return "" -} - // MetricDetails and MetricDetailsResponse is not guaranteed to stay the same in future releases // But we'll do our best to make them stable and only to extend them if that's necessary type MetricDetails struct { @@ -814,7 +767,7 @@ type MetricDetails struct { func (x *MetricDetails) Reset() { *x = MetricDetails{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[14] + mi := &file_carbonapi_v2_pb_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -827,7 +780,7 @@ func (x *MetricDetails) String() string { func (*MetricDetails) ProtoMessage() {} func (x *MetricDetails) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[14] + mi := &file_carbonapi_v2_pb_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -840,7 +793,7 @@ func (x *MetricDetails) ProtoReflect() protoreflect.Message { // Deprecated: Use MetricDetails.ProtoReflect.Descriptor instead. func (*MetricDetails) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{14} + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{13} } func (x *MetricDetails) GetSize() int64 { @@ -884,7 +837,7 @@ type MetricDetailsResponse struct { func (x *MetricDetailsResponse) Reset() { *x = MetricDetailsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[15] + mi := &file_carbonapi_v2_pb_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -897,7 +850,7 @@ func (x *MetricDetailsResponse) String() string { func (*MetricDetailsResponse) ProtoMessage() {} func (x *MetricDetailsResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[15] + mi := &file_carbonapi_v2_pb_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -910,7 +863,7 @@ func (x *MetricDetailsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MetricDetailsResponse.ProtoReflect.Descriptor instead. func (*MetricDetailsResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{15} + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{14} } func (x *MetricDetailsResponse) GetMetrics() map[string]*MetricDetails { @@ -945,7 +898,7 @@ type ProtocolVersionResponse struct { func (x *ProtocolVersionResponse) Reset() { *x = ProtocolVersionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[16] + mi := &file_carbonapi_v2_pb_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -958,7 +911,7 @@ func (x *ProtocolVersionResponse) String() string { func (*ProtocolVersionResponse) ProtoMessage() {} func (x *ProtocolVersionResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[16] + mi := &file_carbonapi_v2_pb_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -971,7 +924,7 @@ func (x *ProtocolVersionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtocolVersionResponse.ProtoReflect.Descriptor instead. func (*ProtocolVersionResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{16} + return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{15} } func (x *ProtocolVersionResponse) GetVersion() string { @@ -1061,39 +1014,36 @@ var file_carbonapi_v2_pb_proto_rawDesc = []byte{ 0x2f, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x22, 0x24, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x6b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x4d, - 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x4d, 0x6f, - 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x41, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x41, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x52, - 0x64, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x52, 0x64, 0x54, - 0x69, 0x6d, 0x65, 0x22, 0x80, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, - 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, - 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, - 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x1c, 0x0a, 0x09, - 0x46, 0x72, 0x65, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x09, 0x46, 0x72, 0x65, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x54, 0x6f, - 0x74, 0x61, 0x6c, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, - 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x70, 0x61, 0x63, 0x65, 0x1a, 0x5a, 0x0a, 0x0c, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x61, - 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x33, 0x0a, 0x17, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x31, 0x5a, 0x2f, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2d, 0x67, 0x72, 0x61, - 0x70, 0x68, 0x69, 0x74, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x63, - 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x22, 0x6b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x04, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x41, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x41, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x52, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x52, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x80, 0x02, + 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, + 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x46, 0x72, 0x65, 0x65, 0x53, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x46, 0x72, 0x65, 0x65, 0x53, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, + 0x70, 0x61, 0x63, 0x65, 0x1a, 0x5a, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x33, 0x0a, 0x17, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2d, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x74, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, + 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1108,7 +1058,7 @@ func file_carbonapi_v2_pb_proto_rawDescGZIP() []byte { return file_carbonapi_v2_pb_proto_rawDescData } -var file_carbonapi_v2_pb_proto_msgTypes = make([]protoimpl.MessageInfo, 18) +var file_carbonapi_v2_pb_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_carbonapi_v2_pb_proto_goTypes = []interface{}{ (*FetchRequest)(nil), // 0: carbonapi_v2_pb.FetchRequest (*MultiFetchRequest)(nil), // 1: carbonapi_v2_pb.MultiFetchRequest @@ -1123,11 +1073,10 @@ var file_carbonapi_v2_pb_proto_goTypes = []interface{}{ (*ServerInfoResponse)(nil), // 10: carbonapi_v2_pb.ServerInfoResponse (*ZipperInfoResponse)(nil), // 11: carbonapi_v2_pb.ZipperInfoResponse (*ListMetricsResponse)(nil), // 12: carbonapi_v2_pb.ListMetricsResponse - (*MetricResponse)(nil), // 13: carbonapi_v2_pb.MetricResponse - (*MetricDetails)(nil), // 14: carbonapi_v2_pb.MetricDetails - (*MetricDetailsResponse)(nil), // 15: carbonapi_v2_pb.MetricDetailsResponse - (*ProtocolVersionResponse)(nil), // 16: carbonapi_v2_pb.ProtocolVersionResponse - nil, // 17: carbonapi_v2_pb.MetricDetailsResponse.MetricsEntry + (*MetricDetails)(nil), // 13: carbonapi_v2_pb.MetricDetails + (*MetricDetailsResponse)(nil), // 14: carbonapi_v2_pb.MetricDetailsResponse + (*ProtocolVersionResponse)(nil), // 15: carbonapi_v2_pb.ProtocolVersionResponse + nil, // 16: carbonapi_v2_pb.MetricDetailsResponse.MetricsEntry } var file_carbonapi_v2_pb_proto_depIdxs = []int32{ 0, // 0: carbonapi_v2_pb.MultiFetchRequest.metrics:type_name -> carbonapi_v2_pb.FetchRequest @@ -1136,8 +1085,8 @@ var file_carbonapi_v2_pb_proto_depIdxs = []int32{ 8, // 3: carbonapi_v2_pb.InfoResponse.retentions:type_name -> carbonapi_v2_pb.Retention 9, // 4: carbonapi_v2_pb.ServerInfoResponse.info:type_name -> carbonapi_v2_pb.InfoResponse 10, // 5: carbonapi_v2_pb.ZipperInfoResponse.responses:type_name -> carbonapi_v2_pb.ServerInfoResponse - 17, // 6: carbonapi_v2_pb.MetricDetailsResponse.metrics:type_name -> carbonapi_v2_pb.MetricDetailsResponse.MetricsEntry - 14, // 7: carbonapi_v2_pb.MetricDetailsResponse.MetricsEntry.value:type_name -> carbonapi_v2_pb.MetricDetails + 16, // 6: carbonapi_v2_pb.MetricDetailsResponse.metrics:type_name -> carbonapi_v2_pb.MetricDetailsResponse.MetricsEntry + 13, // 7: carbonapi_v2_pb.MetricDetailsResponse.MetricsEntry.value:type_name -> carbonapi_v2_pb.MetricDetails 8, // [8:8] is the sub-list for method output_type 8, // [8:8] is the sub-list for method input_type 8, // [8:8] is the sub-list for extension type_name @@ -1308,18 +1257,6 @@ func file_carbonapi_v2_pb_proto_init() { } } file_carbonapi_v2_pb_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetricResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_carbonapi_v2_pb_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MetricDetails); i { case 0: return &v.state @@ -1331,7 +1268,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MetricDetailsResponse); i { case 0: return &v.state @@ -1343,7 +1280,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProtocolVersionResponse); i { case 0: return &v.state @@ -1362,7 +1299,7 @@ func file_carbonapi_v2_pb_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_carbonapi_v2_pb_proto_rawDesc, NumEnums: 0, - NumMessages: 18, + NumMessages: 17, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.proto b/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.proto index cac4b6431..179aa5d2f 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.proto +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.proto @@ -70,10 +70,6 @@ message ListMetricsResponse { repeated string Metrics = 1; } -message MetricResponse { - string name = 1; -} - // MetricDetails and MetricDetailsResponse is not guaranteed to stay the same in future releases // But we'll do our best to make them stable and only to extend them if that's necessary message MetricDetails { diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb_vtproto.pb.go b/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb_vtproto.pb.go index feed513cf..6e379d40b 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb_vtproto.pb.go +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb_vtproto.pb.go @@ -261,18 +261,6 @@ func (this *ListMetricsResponse) EqualVT(that *ListMetricsResponse) bool { return string(this.unknownFields) == string(that.unknownFields) } -func (this *MetricResponse) EqualVT(that *MetricResponse) bool { - if this == nil { - return that == nil || fmt.Sprintf("%v", that) == "" - } else if that == nil { - return fmt.Sprintf("%v", this) == "" - } - if this.Name != that.Name { - return false - } - return string(this.unknownFields) == string(that.unknownFields) -} - func (this *MetricDetails) EqualVT(that *MetricDetails) bool { if this == nil { return that == nil || fmt.Sprintf("%v", that) == "" @@ -979,46 +967,6 @@ func (m *ListMetricsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *MetricResponse) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *MetricResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *MetricDetails) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -1456,22 +1404,6 @@ func (m *ListMetricsResponse) SizeVT() (n int) { return n } -func (m *MetricResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.unknownFields != nil { - n += len(m.unknownFields) - } - return n -} - func (m *MetricDetails) SizeVT() (n int) { if m == nil { return 0 @@ -3045,89 +2977,6 @@ func (m *ListMetricsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *MetricResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *MetricDetails) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/modules.txt b/vendor/modules.txt index 6b38e8c5e..0e726ff11 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -59,7 +59,7 @@ github.com/go-graphite/carbonzipper/zipper/httpHeaders # github.com/go-graphite/go-whisper v0.0.0-20220504075317-5035f072cad3 ## explicit github.com/go-graphite/go-whisper -# github.com/go-graphite/protocol v1.0.1-0.20220623155218-0727176fe6f6 +# github.com/go-graphite/protocol v1.0.1-0.20220630134049-92705234d05d ## explicit; go 1.16 github.com/go-graphite/protocol/carbonapi_v2_grpc github.com/go-graphite/protocol/carbonapi_v2_pb From 1accaf422b32cf32bf6cc8701cbde500aa8a1038 Mon Sep 17 00:00:00 2001 From: Emad Mohamadi Date: Mon, 4 Jul 2022 13:04:58 +0200 Subject: [PATCH 05/11] Fix lint errors --- api/sample/cache-query/cache-query.go | 3 ++- carbonserver/carbonserver.go | 8 ++++---- carbonserver/render.go | 13 +++++++------ receiver/pubsub/pubsub_test.go | 3 ++- 4 files changed, 15 insertions(+), 12 deletions(-) diff --git a/api/sample/cache-query/cache-query.go b/api/sample/cache-query/cache-query.go index 0f2b7daad..6e41fa922 100644 --- a/api/sample/cache-query/cache-query.go +++ b/api/sample/cache-query/cache-query.go @@ -10,6 +10,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "github.com/go-graphite/go-carbon/helper/carbonpb" ) @@ -20,7 +21,7 @@ func main() { flag.Parse() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - conn, err := grpc.DialContext(ctx, *server, grpc.WithInsecure(), grpc.WithBlock()) + conn, err := grpc.DialContext(ctx, *server, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/carbonserver/carbonserver.go b/carbonserver/carbonserver.go index 964274c8a..53edc7a09 100644 --- a/carbonserver/carbonserver.go +++ b/carbonserver/carbonserver.go @@ -22,8 +22,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/go-graphite/protocol/carbonapi_v2_grpc" - "google.golang.org/grpc" "io" "math" "net" @@ -52,11 +50,13 @@ import ( "github.com/go-graphite/go-carbon/helper" "github.com/go-graphite/go-carbon/helper/stat" "github.com/go-graphite/go-carbon/points" + grpcv2 "github.com/go-graphite/protocol/carbonapi_v2_grpc" protov3 "github.com/go-graphite/protocol/carbonapi_v3_pb" "github.com/lomik/zapwriter" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/filter" "github.com/syndtr/goleveldb/leveldb/opt" + "google.golang.org/grpc" ) type metricStruct struct { @@ -210,7 +210,7 @@ func (q *queryCache) getQueryItem(k string, size uint64, expire int32) *QueryIte } type CarbonserverListener struct { - carbonapi_v2_grpc.UnimplementedCarbonV2Server + grpcv2.UnimplementedCarbonV2Server helper.Stoppable cacheGet func(key string) []points.Point readTimeout time.Duration @@ -1998,7 +1998,7 @@ func (listener *CarbonserverListener) ListenGRPC(listen string) error { var opts []grpc.ServerOption grpcServer := grpc.NewServer(opts...) - carbonapi_v2_grpc.RegisterCarbonV2Server(grpcServer, listener) + grpcv2.RegisterCarbonV2Server(grpcServer, listener) go grpcServer.Serve(listener.grpcListener) return nil } diff --git a/carbonserver/render.go b/carbonserver/render.go index 443e52e0a..4b9486603 100644 --- a/carbonserver/render.go +++ b/carbonserver/render.go @@ -5,10 +5,6 @@ import ( "context" "encoding/json" "fmt" - "github.com/go-graphite/protocol/carbonapi_v2_grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/status" "io/ioutil" "math" "net/http" @@ -19,8 +15,12 @@ import ( "time" "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" "github.com/go-graphite/carbonzipper/zipper/httpHeaders" + grpcv2 "github.com/go-graphite/protocol/carbonapi_v2_grpc" protov2 "github.com/go-graphite/protocol/carbonapi_v2_pb" protov3 "github.com/go-graphite/protocol/carbonapi_v3_pb" pickle "github.com/lomik/og-rek" @@ -675,7 +675,8 @@ func (listener *CarbonserverListener) fetchDataPB(metric string, files []string, return &multi, nil } -func (listener *CarbonserverListener) Render(req *protov2.MultiFetchRequest, stream carbonapi_v2_grpc.CarbonV2_RenderServer) (rpcErr error) { +// Render implements Render rpc of CarbonV2 gRPC service +func (listener *CarbonserverListener) Render(req *protov2.MultiFetchRequest, stream grpcv2.CarbonV2_RenderServer) (rpcErr error) { t0 := time.Now() ctx := stream.Context() @@ -731,7 +732,7 @@ func (listener *CarbonserverListener) Render(req *protov2.MultiFetchRequest, str // TODO: implementing cache? fromCache := false - // TODO: chan buffer size should be configurable + // TODO: should chan buffer size be configurable? responseChan := make(chan *protov2.FetchResponse, 1000) metricNames := getUniqueMetricNames(targets) diff --git a/receiver/pubsub/pubsub_test.go b/receiver/pubsub/pubsub_test.go index c793c6326..46bbe579d 100644 --- a/receiver/pubsub/pubsub_test.go +++ b/receiver/pubsub/pubsub_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/assert" "google.golang.org/api/option" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ) var ( @@ -24,7 +25,7 @@ func newTestClient() (*pstest.Server, *pubsub.Topic, *pubsub.Client, error) { ctx := context.Background() srv := pstest.NewServer() srv.SetStreamTimeout(500 * time.Millisecond) - conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + conn, err := grpc.Dial(srv.Addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, nil, nil, err } From 7a86d01c035fcaf79353312841594882208c1a31 Mon Sep 17 00:00:00 2001 From: Emad Mohamadi Date: Wed, 13 Jul 2022 18:38:39 +0200 Subject: [PATCH 06/11] Use fixed paths protocol --- go.mod | 2 +- go.sum | 6 +- .../carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go | 113 ++++++----- .../carbonapi_v2_grpc/carbonapi_v2_grpc.proto | 2 +- .../carbonapi_v2_grpc_vtproto.pb.go | 4 +- .../protocol/carbonapi_v2_grpc/gen.go | 3 - .../carbonapi_v2_pb/carbonapi_v2_pb.pb.go | 179 +++++++++--------- .../carbonapi_v2_pb_vtproto.pb.go | 2 +- vendor/modules.txt | 2 +- 9 files changed, 154 insertions(+), 159 deletions(-) diff --git a/go.mod b/go.mod index f24fb2906..b8979d56e 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/dgryski/httputil v0.0.0-20160116060654-189c2918cd08 github.com/go-graphite/carbonzipper v0.0.0-20180329125635-fedce067a794 github.com/go-graphite/go-whisper v0.0.0-20220504075317-5035f072cad3 - github.com/go-graphite/protocol v1.0.1-0.20220630134049-92705234d05d + github.com/go-graphite/protocol v1.0.1-0.20220713140022-0180df7ab791 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect github.com/google/go-cmp v0.5.6 diff --git a/go.sum b/go.sum index 991172639..b57178834 100644 --- a/go.sum +++ b/go.sum @@ -88,10 +88,8 @@ github.com/go-graphite/carbonzipper v0.0.0-20180329125635-fedce067a794 h1:9N+1I8 github.com/go-graphite/carbonzipper v0.0.0-20180329125635-fedce067a794/go.mod h1:sfZ+AkP8/bBcWSGVqhseV6t6e/+C0i/PkTpA32W5rXs= github.com/go-graphite/go-whisper v0.0.0-20220504075317-5035f072cad3 h1:QlegEOONT3wkw4c8/dIwGeAahnHonvJ1MaUUzPviZxk= github.com/go-graphite/go-whisper v0.0.0-20220504075317-5035f072cad3/go.mod h1:XnU8q9X8C3OhD8R7gAlRLI6VXU2KMwqgTO9Tshub2qE= -github.com/go-graphite/protocol v1.0.1-0.20220623155218-0727176fe6f6 h1:o6x5biB/GOeCgdxgr1JrDHzKkj7jVLH1Mx/P293BLb0= -github.com/go-graphite/protocol v1.0.1-0.20220623155218-0727176fe6f6/go.mod h1:puWkW2DFZi46CaLY1rxYhkDiBDFlpaTDaMjcE/Cd9gw= -github.com/go-graphite/protocol v1.0.1-0.20220630134049-92705234d05d h1:f9YLmzZytXwaYpFDMMqoEj6OmWQc/WOenF/gwgsHB0s= -github.com/go-graphite/protocol v1.0.1-0.20220630134049-92705234d05d/go.mod h1:puWkW2DFZi46CaLY1rxYhkDiBDFlpaTDaMjcE/Cd9gw= +github.com/go-graphite/protocol v1.0.1-0.20220713140022-0180df7ab791 h1:W2oVuFO138fYSMWIzDlHlL/4y6OSWFEPAbi55WQkOFY= +github.com/go-graphite/protocol v1.0.1-0.20220713140022-0180df7ab791/go.mod h1:puWkW2DFZi46CaLY1rxYhkDiBDFlpaTDaMjcE/Cd9gw= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go index 72e40575c..b43da29e5 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.0 -// protoc v3.15.8 -// source: carbonapi_v2_grpc.proto +// protoc v3.19.4 +// source: carbonapi_v2_grpc/carbonapi_v2_grpc.proto package carbonapi_v2_grpc @@ -21,54 +21,53 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -var File_carbonapi_v2_grpc_proto protoreflect.FileDescriptor +var File_carbonapi_v2_grpc_carbonapi_v2_grpc_proto protoreflect.FileDescriptor -var file_carbonapi_v2_grpc_proto_rawDesc = []byte{ - 0x0a, 0x17, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x63, 0x61, 0x72, 0x62, 0x6f, - 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x1a, 0x1b, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, - 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2d, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x74, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x63, 0x61, 0x72, 0x62, 0x6f, - 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2f, 0x63, 0x61, 0x72, 0x62, 0x6f, - 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x32, 0xcc, 0x03, 0x0a, 0x08, 0x43, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x56, 0x32, 0x12, 0x50, 0x0a, - 0x06, 0x52, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x22, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, - 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x46, - 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x61, - 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x65, - 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, - 0x45, 0x0a, 0x04, 0x46, 0x69, 0x6e, 0x64, 0x12, 0x1c, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, +var file_carbonapi_v2_grpc_carbonapi_v2_grpc_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x67, + 0x72, 0x70, 0x63, 0x2f, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, + 0x5f, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x63, 0x61, 0x72, + 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x1a, 0x1b, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x63, 0x61, 0x72, + 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2f, 0x63, 0x61, 0x72, + 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x32, 0xcc, 0x03, 0x0a, 0x08, 0x43, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x56, 0x32, 0x12, + 0x50, 0x0a, 0x06, 0x52, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x22, 0x2e, 0x63, 0x61, 0x72, 0x62, + 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x75, 0x6c, 0x74, + 0x69, 0x46, 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, + 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, + 0x46, 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, + 0x01, 0x12, 0x45, 0x0a, 0x04, 0x46, 0x69, 0x6e, 0x64, 0x12, 0x1c, 0x2e, 0x63, 0x61, 0x72, 0x62, + 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, - 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x24, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, - 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, - 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, - 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, - 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, - 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x4d, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x28, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, - 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, - 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, - 0x2d, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x74, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x2f, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, - 0x67, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, + 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x24, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, + 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x45, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, + 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, + 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, + 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x28, 0x2e, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x67, 0x6f, 0x2d, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x74, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, + 0x32, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -var file_carbonapi_v2_grpc_proto_goTypes = []interface{}{ +var file_carbonapi_v2_grpc_carbonapi_v2_grpc_proto_goTypes = []interface{}{ (*carbonapi_v2_pb.MultiFetchRequest)(nil), // 0: carbonapi_v2_pb.MultiFetchRequest (*carbonapi_v2_pb.GlobRequest)(nil), // 1: carbonapi_v2_pb.GlobRequest (*emptypb.Empty)(nil), // 2: google.protobuf.Empty @@ -80,7 +79,7 @@ var file_carbonapi_v2_grpc_proto_goTypes = []interface{}{ (*carbonapi_v2_pb.MetricDetailsResponse)(nil), // 8: carbonapi_v2_pb.MetricDetailsResponse (*carbonapi_v2_pb.ProtocolVersionResponse)(nil), // 9: carbonapi_v2_pb.ProtocolVersionResponse } -var file_carbonapi_v2_grpc_proto_depIdxs = []int32{ +var file_carbonapi_v2_grpc_carbonapi_v2_grpc_proto_depIdxs = []int32{ 0, // 0: carbonapi_v2_grpc.CarbonV2.Render:input_type -> carbonapi_v2_pb.MultiFetchRequest 1, // 1: carbonapi_v2_grpc.CarbonV2.Find:input_type -> carbonapi_v2_pb.GlobRequest 2, // 2: carbonapi_v2_grpc.CarbonV2.List:input_type -> google.protobuf.Empty @@ -100,26 +99,26 @@ var file_carbonapi_v2_grpc_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for field type_name } -func init() { file_carbonapi_v2_grpc_proto_init() } -func file_carbonapi_v2_grpc_proto_init() { - if File_carbonapi_v2_grpc_proto != nil { +func init() { file_carbonapi_v2_grpc_carbonapi_v2_grpc_proto_init() } +func file_carbonapi_v2_grpc_carbonapi_v2_grpc_proto_init() { + if File_carbonapi_v2_grpc_carbonapi_v2_grpc_proto != nil { return } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_carbonapi_v2_grpc_proto_rawDesc, + RawDescriptor: file_carbonapi_v2_grpc_carbonapi_v2_grpc_proto_rawDesc, NumEnums: 0, NumMessages: 0, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_carbonapi_v2_grpc_proto_goTypes, - DependencyIndexes: file_carbonapi_v2_grpc_proto_depIdxs, + GoTypes: file_carbonapi_v2_grpc_carbonapi_v2_grpc_proto_goTypes, + DependencyIndexes: file_carbonapi_v2_grpc_carbonapi_v2_grpc_proto_depIdxs, }.Build() - File_carbonapi_v2_grpc_proto = out.File - file_carbonapi_v2_grpc_proto_rawDesc = nil - file_carbonapi_v2_grpc_proto_goTypes = nil - file_carbonapi_v2_grpc_proto_depIdxs = nil + File_carbonapi_v2_grpc_carbonapi_v2_grpc_proto = out.File + file_carbonapi_v2_grpc_carbonapi_v2_grpc_proto_rawDesc = nil + file_carbonapi_v2_grpc_carbonapi_v2_grpc_proto_goTypes = nil + file_carbonapi_v2_grpc_carbonapi_v2_grpc_proto_depIdxs = nil } diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto index 9d8ce75a6..4b2522954 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto @@ -4,7 +4,7 @@ package carbonapi_v2_grpc; option go_package = "github.com/go-graphite/protocol/carbonapi_v2_grpc"; import "google/protobuf/empty.proto"; -import "github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.proto"; +import "carbonapi_v2_pb/carbonapi_v2_pb.proto"; service CarbonV2 { rpc Render(carbonapi_v2_pb.MultiFetchRequest) returns (stream carbonapi_v2_pb.FetchResponse) {} diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc_vtproto.pb.go b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc_vtproto.pb.go index 98edf4700..54198c27f 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc_vtproto.pb.go +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc_vtproto.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. // protoc-gen-go-vtproto version: v0.3.0 -// source: carbonapi_v2_grpc.proto +// source: carbonapi_v2_grpc/carbonapi_v2_grpc.proto package carbonapi_v2_grpc @@ -317,5 +317,5 @@ var CarbonV2_ServiceDesc = grpc.ServiceDesc{ ServerStreams: true, }, }, - Metadata: "carbonapi_v2_grpc.proto", + Metadata: "carbonapi_v2_grpc/carbonapi_v2_grpc.proto", } diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/gen.go b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/gen.go index 6ad8965fc..06ae32fa1 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/gen.go +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/gen.go @@ -1,12 +1,9 @@ package carbonapi_v2_grpc -//go:generate mkdir -p github.com/go-graphite/protocol/ -//go:generate cp -r ../carbonapi_v2_pb github.com/go-graphite/protocol/ //go:generate protoc --go_out=. --go-vtproto_out=. --go-vtproto_opt=features=all carbonapi_v2_grpc.proto --proto_path=. --proto_path=../ //go:generate mv github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.pb.go ./ //go:generate mv github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc_vtproto.pb.go ./ //go:generate rm -r github.com/go-graphite/protocol/carbonapi_v2_grpc -//go:generate rm -r github.com/go-graphite/protocol/carbonapi_v2_pb //go:generate rm -r github.com/go-graphite/protocol //go:generate rm -r github.com/go-graphite //go:generate rm -r github.com/ diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.pb.go b/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.pb.go index e73c67b9c..13f75bca7 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.pb.go +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb.pb.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.0 -// protoc v3.15.8 -// source: carbonapi_v2_pb.proto +// protoc v3.19.4 +// source: carbonapi_v2_pb/carbonapi_v2_pb.proto package carbonapi_v2_pb @@ -33,7 +33,7 @@ type FetchRequest struct { func (x *FetchRequest) Reset() { *x = FetchRequest{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[0] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -46,7 +46,7 @@ func (x *FetchRequest) String() string { func (*FetchRequest) ProtoMessage() {} func (x *FetchRequest) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[0] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -59,7 +59,7 @@ func (x *FetchRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FetchRequest.ProtoReflect.Descriptor instead. func (*FetchRequest) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{0} + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP(), []int{0} } func (x *FetchRequest) GetName() string { @@ -94,7 +94,7 @@ type MultiFetchRequest struct { func (x *MultiFetchRequest) Reset() { *x = MultiFetchRequest{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[1] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -107,7 +107,7 @@ func (x *MultiFetchRequest) String() string { func (*MultiFetchRequest) ProtoMessage() {} func (x *MultiFetchRequest) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[1] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -120,7 +120,7 @@ func (x *MultiFetchRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MultiFetchRequest.ProtoReflect.Descriptor instead. func (*MultiFetchRequest) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{1} + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP(), []int{1} } func (x *MultiFetchRequest) GetMetrics() []*FetchRequest { @@ -146,7 +146,7 @@ type FetchResponse struct { func (x *FetchResponse) Reset() { *x = FetchResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[2] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -159,7 +159,7 @@ func (x *FetchResponse) String() string { func (*FetchResponse) ProtoMessage() {} func (x *FetchResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[2] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -172,7 +172,7 @@ func (x *FetchResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FetchResponse.ProtoReflect.Descriptor instead. func (*FetchResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{2} + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP(), []int{2} } func (x *FetchResponse) GetName() string { @@ -228,7 +228,7 @@ type MultiFetchResponse struct { func (x *MultiFetchResponse) Reset() { *x = MultiFetchResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[3] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -241,7 +241,7 @@ func (x *MultiFetchResponse) String() string { func (*MultiFetchResponse) ProtoMessage() {} func (x *MultiFetchResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[3] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -254,7 +254,7 @@ func (x *MultiFetchResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MultiFetchResponse.ProtoReflect.Descriptor instead. func (*MultiFetchResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{3} + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP(), []int{3} } func (x *MultiFetchResponse) GetMetrics() []*FetchResponse { @@ -275,7 +275,7 @@ type GlobRequest struct { func (x *GlobRequest) Reset() { *x = GlobRequest{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[4] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -288,7 +288,7 @@ func (x *GlobRequest) String() string { func (*GlobRequest) ProtoMessage() {} func (x *GlobRequest) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[4] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -301,7 +301,7 @@ func (x *GlobRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobRequest.ProtoReflect.Descriptor instead. func (*GlobRequest) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{4} + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP(), []int{4} } func (x *GlobRequest) GetQuery() string { @@ -323,7 +323,7 @@ type GlobMatch struct { func (x *GlobMatch) Reset() { *x = GlobMatch{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[5] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -336,7 +336,7 @@ func (x *GlobMatch) String() string { func (*GlobMatch) ProtoMessage() {} func (x *GlobMatch) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[5] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -349,7 +349,7 @@ func (x *GlobMatch) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobMatch.ProtoReflect.Descriptor instead. func (*GlobMatch) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{5} + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP(), []int{5} } func (x *GlobMatch) GetPath() string { @@ -378,7 +378,7 @@ type GlobResponse struct { func (x *GlobResponse) Reset() { *x = GlobResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[6] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -391,7 +391,7 @@ func (x *GlobResponse) String() string { func (*GlobResponse) ProtoMessage() {} func (x *GlobResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[6] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -404,7 +404,7 @@ func (x *GlobResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobResponse.ProtoReflect.Descriptor instead. func (*GlobResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{6} + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP(), []int{6} } func (x *GlobResponse) GetName() string { @@ -432,7 +432,7 @@ type InfoRequest struct { func (x *InfoRequest) Reset() { *x = InfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[7] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -445,7 +445,7 @@ func (x *InfoRequest) String() string { func (*InfoRequest) ProtoMessage() {} func (x *InfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[7] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -458,7 +458,7 @@ func (x *InfoRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use InfoRequest.ProtoReflect.Descriptor instead. func (*InfoRequest) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{7} + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP(), []int{7} } func (x *InfoRequest) GetName() string { @@ -480,7 +480,7 @@ type Retention struct { func (x *Retention) Reset() { *x = Retention{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[8] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -493,7 +493,7 @@ func (x *Retention) String() string { func (*Retention) ProtoMessage() {} func (x *Retention) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[8] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -506,7 +506,7 @@ func (x *Retention) ProtoReflect() protoreflect.Message { // Deprecated: Use Retention.ProtoReflect.Descriptor instead. func (*Retention) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{8} + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP(), []int{8} } func (x *Retention) GetSecondsPerPoint() int32 { @@ -538,7 +538,7 @@ type InfoResponse struct { func (x *InfoResponse) Reset() { *x = InfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[9] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -551,7 +551,7 @@ func (x *InfoResponse) String() string { func (*InfoResponse) ProtoMessage() {} func (x *InfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[9] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -564,7 +564,7 @@ func (x *InfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use InfoResponse.ProtoReflect.Descriptor instead. func (*InfoResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{9} + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP(), []int{9} } func (x *InfoResponse) GetName() string { @@ -614,7 +614,7 @@ type ServerInfoResponse struct { func (x *ServerInfoResponse) Reset() { *x = ServerInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[10] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -627,7 +627,7 @@ func (x *ServerInfoResponse) String() string { func (*ServerInfoResponse) ProtoMessage() {} func (x *ServerInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[10] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -640,7 +640,7 @@ func (x *ServerInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerInfoResponse.ProtoReflect.Descriptor instead. func (*ServerInfoResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{10} + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP(), []int{10} } func (x *ServerInfoResponse) GetServer() string { @@ -668,7 +668,7 @@ type ZipperInfoResponse struct { func (x *ZipperInfoResponse) Reset() { *x = ZipperInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[11] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -681,7 +681,7 @@ func (x *ZipperInfoResponse) String() string { func (*ZipperInfoResponse) ProtoMessage() {} func (x *ZipperInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[11] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -694,7 +694,7 @@ func (x *ZipperInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ZipperInfoResponse.ProtoReflect.Descriptor instead. func (*ZipperInfoResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{11} + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP(), []int{11} } func (x *ZipperInfoResponse) GetResponses() []*ServerInfoResponse { @@ -715,7 +715,7 @@ type ListMetricsResponse struct { func (x *ListMetricsResponse) Reset() { *x = ListMetricsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[12] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -728,7 +728,7 @@ func (x *ListMetricsResponse) String() string { func (*ListMetricsResponse) ProtoMessage() {} func (x *ListMetricsResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[12] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -741,7 +741,7 @@ func (x *ListMetricsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListMetricsResponse.ProtoReflect.Descriptor instead. func (*ListMetricsResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{12} + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP(), []int{12} } func (x *ListMetricsResponse) GetMetrics() []string { @@ -767,7 +767,7 @@ type MetricDetails struct { func (x *MetricDetails) Reset() { *x = MetricDetails{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[13] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -780,7 +780,7 @@ func (x *MetricDetails) String() string { func (*MetricDetails) ProtoMessage() {} func (x *MetricDetails) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[13] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -793,7 +793,7 @@ func (x *MetricDetails) ProtoReflect() protoreflect.Message { // Deprecated: Use MetricDetails.ProtoReflect.Descriptor instead. func (*MetricDetails) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{13} + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP(), []int{13} } func (x *MetricDetails) GetSize() int64 { @@ -837,7 +837,7 @@ type MetricDetailsResponse struct { func (x *MetricDetailsResponse) Reset() { *x = MetricDetailsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[14] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -850,7 +850,7 @@ func (x *MetricDetailsResponse) String() string { func (*MetricDetailsResponse) ProtoMessage() {} func (x *MetricDetailsResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[14] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -863,7 +863,7 @@ func (x *MetricDetailsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MetricDetailsResponse.ProtoReflect.Descriptor instead. func (*MetricDetailsResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{14} + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP(), []int{14} } func (x *MetricDetailsResponse) GetMetrics() map[string]*MetricDetails { @@ -898,7 +898,7 @@ type ProtocolVersionResponse struct { func (x *ProtocolVersionResponse) Reset() { *x = ProtocolVersionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_carbonapi_v2_pb_proto_msgTypes[15] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -911,7 +911,7 @@ func (x *ProtocolVersionResponse) String() string { func (*ProtocolVersionResponse) ProtoMessage() {} func (x *ProtocolVersionResponse) ProtoReflect() protoreflect.Message { - mi := &file_carbonapi_v2_pb_proto_msgTypes[15] + mi := &file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -924,7 +924,7 @@ func (x *ProtocolVersionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtocolVersionResponse.ProtoReflect.Descriptor instead. func (*ProtocolVersionResponse) Descriptor() ([]byte, []int) { - return file_carbonapi_v2_pb_proto_rawDescGZIP(), []int{15} + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP(), []int{15} } func (x *ProtocolVersionResponse) GetVersion() string { @@ -934,10 +934,11 @@ func (x *ProtocolVersionResponse) GetVersion() string { return "" } -var File_carbonapi_v2_pb_proto protoreflect.FileDescriptor +var File_carbonapi_v2_pb_carbonapi_v2_pb_proto protoreflect.FileDescriptor -var file_carbonapi_v2_pb_proto_rawDesc = []byte{ - 0x0a, 0x15, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, +var file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, + 0x62, 0x2f, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x61, 0x72, 0x62, 0x6f, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x5f, 0x70, 0x62, 0x22, 0x5c, 0x0a, 0x0c, 0x46, 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, @@ -1047,19 +1048,19 @@ var file_carbonapi_v2_pb_proto_rawDesc = []byte{ } var ( - file_carbonapi_v2_pb_proto_rawDescOnce sync.Once - file_carbonapi_v2_pb_proto_rawDescData = file_carbonapi_v2_pb_proto_rawDesc + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescOnce sync.Once + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescData = file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDesc ) -func file_carbonapi_v2_pb_proto_rawDescGZIP() []byte { - file_carbonapi_v2_pb_proto_rawDescOnce.Do(func() { - file_carbonapi_v2_pb_proto_rawDescData = protoimpl.X.CompressGZIP(file_carbonapi_v2_pb_proto_rawDescData) +func file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescGZIP() []byte { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescOnce.Do(func() { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescData = protoimpl.X.CompressGZIP(file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescData) }) - return file_carbonapi_v2_pb_proto_rawDescData + return file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDescData } -var file_carbonapi_v2_pb_proto_msgTypes = make([]protoimpl.MessageInfo, 17) -var file_carbonapi_v2_pb_proto_goTypes = []interface{}{ +var file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_carbonapi_v2_pb_carbonapi_v2_pb_proto_goTypes = []interface{}{ (*FetchRequest)(nil), // 0: carbonapi_v2_pb.FetchRequest (*MultiFetchRequest)(nil), // 1: carbonapi_v2_pb.MultiFetchRequest (*FetchResponse)(nil), // 2: carbonapi_v2_pb.FetchResponse @@ -1078,7 +1079,7 @@ var file_carbonapi_v2_pb_proto_goTypes = []interface{}{ (*ProtocolVersionResponse)(nil), // 15: carbonapi_v2_pb.ProtocolVersionResponse nil, // 16: carbonapi_v2_pb.MetricDetailsResponse.MetricsEntry } -var file_carbonapi_v2_pb_proto_depIdxs = []int32{ +var file_carbonapi_v2_pb_carbonapi_v2_pb_proto_depIdxs = []int32{ 0, // 0: carbonapi_v2_pb.MultiFetchRequest.metrics:type_name -> carbonapi_v2_pb.FetchRequest 2, // 1: carbonapi_v2_pb.MultiFetchResponse.metrics:type_name -> carbonapi_v2_pb.FetchResponse 5, // 2: carbonapi_v2_pb.GlobResponse.matches:type_name -> carbonapi_v2_pb.GlobMatch @@ -1094,13 +1095,13 @@ var file_carbonapi_v2_pb_proto_depIdxs = []int32{ 0, // [0:8] is the sub-list for field type_name } -func init() { file_carbonapi_v2_pb_proto_init() } -func file_carbonapi_v2_pb_proto_init() { - if File_carbonapi_v2_pb_proto != nil { +func init() { file_carbonapi_v2_pb_carbonapi_v2_pb_proto_init() } +func file_carbonapi_v2_pb_carbonapi_v2_pb_proto_init() { + if File_carbonapi_v2_pb_carbonapi_v2_pb_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_carbonapi_v2_pb_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FetchRequest); i { case 0: return &v.state @@ -1112,7 +1113,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MultiFetchRequest); i { case 0: return &v.state @@ -1124,7 +1125,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FetchResponse); i { case 0: return &v.state @@ -1136,7 +1137,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MultiFetchResponse); i { case 0: return &v.state @@ -1148,7 +1149,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GlobRequest); i { case 0: return &v.state @@ -1160,7 +1161,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GlobMatch); i { case 0: return &v.state @@ -1172,7 +1173,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GlobResponse); i { case 0: return &v.state @@ -1184,7 +1185,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*InfoRequest); i { case 0: return &v.state @@ -1196,7 +1197,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Retention); i { case 0: return &v.state @@ -1208,7 +1209,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*InfoResponse); i { case 0: return &v.state @@ -1220,7 +1221,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServerInfoResponse); i { case 0: return &v.state @@ -1232,7 +1233,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ZipperInfoResponse); i { case 0: return &v.state @@ -1244,7 +1245,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListMetricsResponse); i { case 0: return &v.state @@ -1256,7 +1257,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MetricDetails); i { case 0: return &v.state @@ -1268,7 +1269,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MetricDetailsResponse); i { case 0: return &v.state @@ -1280,7 +1281,7 @@ func file_carbonapi_v2_pb_proto_init() { return nil } } - file_carbonapi_v2_pb_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProtocolVersionResponse); i { case 0: return &v.state @@ -1297,18 +1298,18 @@ func file_carbonapi_v2_pb_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_carbonapi_v2_pb_proto_rawDesc, + RawDescriptor: file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDesc, NumEnums: 0, NumMessages: 17, NumExtensions: 0, NumServices: 0, }, - GoTypes: file_carbonapi_v2_pb_proto_goTypes, - DependencyIndexes: file_carbonapi_v2_pb_proto_depIdxs, - MessageInfos: file_carbonapi_v2_pb_proto_msgTypes, + GoTypes: file_carbonapi_v2_pb_carbonapi_v2_pb_proto_goTypes, + DependencyIndexes: file_carbonapi_v2_pb_carbonapi_v2_pb_proto_depIdxs, + MessageInfos: file_carbonapi_v2_pb_carbonapi_v2_pb_proto_msgTypes, }.Build() - File_carbonapi_v2_pb_proto = out.File - file_carbonapi_v2_pb_proto_rawDesc = nil - file_carbonapi_v2_pb_proto_goTypes = nil - file_carbonapi_v2_pb_proto_depIdxs = nil + File_carbonapi_v2_pb_carbonapi_v2_pb_proto = out.File + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_rawDesc = nil + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_goTypes = nil + file_carbonapi_v2_pb_carbonapi_v2_pb_proto_depIdxs = nil } diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb_vtproto.pb.go b/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb_vtproto.pb.go index 6e379d40b..1c307fce2 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb_vtproto.pb.go +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_pb/carbonapi_v2_pb_vtproto.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. // protoc-gen-go-vtproto version: v0.3.0 -// source: carbonapi_v2_pb.proto +// source: carbonapi_v2_pb/carbonapi_v2_pb.proto package carbonapi_v2_pb diff --git a/vendor/modules.txt b/vendor/modules.txt index 0e726ff11..3021f6578 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -59,7 +59,7 @@ github.com/go-graphite/carbonzipper/zipper/httpHeaders # github.com/go-graphite/go-whisper v0.0.0-20220504075317-5035f072cad3 ## explicit github.com/go-graphite/go-whisper -# github.com/go-graphite/protocol v1.0.1-0.20220630134049-92705234d05d +# github.com/go-graphite/protocol v1.0.1-0.20220713140022-0180df7ab791 ## explicit; go 1.16 github.com/go-graphite/protocol/carbonapi_v2_grpc github.com/go-graphite/protocol/carbonapi_v2_pb From 3898ed23e7b9a4fb37d99907fbf5116614271362 Mon Sep 17 00:00:00 2001 From: Emad Mohamadi Date: Thu, 14 Jul 2022 16:51:32 +0200 Subject: [PATCH 07/11] Refactor prepareProto for stream --- carbonserver/carbonserver_test.go | 7 +- carbonserver/fetchsinglemetric.go | 18 ---- carbonserver/render.go | 169 ++++++++---------------------- 3 files changed, 47 insertions(+), 147 deletions(-) diff --git a/carbonserver/carbonserver_test.go b/carbonserver/carbonserver_test.go index f4f2d21b7..b7e4a650e 100644 --- a/carbonserver/carbonserver_test.go +++ b/carbonserver/carbonserver_test.go @@ -172,8 +172,11 @@ func generalFetchSingleMetricRemove(testData *FetchTest) { } func generalFetchSingleMetricHelper(testData *FetchTest, cache *cache.Cache, carbonserver *CarbonserverListener) (*pb.FetchResponse, error) { - data, err := carbonserver.fetchSingleMetricV2(testData.name, int32(testData.from), int32(testData.until)) - return data, err + data, err := carbonserver.fetchSingleMetric(testData.name, "", int32(testData.from), int32(testData.until)) + if err != nil { + return nil, err + } + return data.proto2(), nil } func testFetchSingleMetricHelper(testData *FetchTest, cache *cache.Cache, carbonserver *CarbonserverListener) (*pb.FetchResponse, error) { diff --git a/carbonserver/fetchsinglemetric.go b/carbonserver/fetchsinglemetric.go index 55c4e9bc9..451c9dc2e 100644 --- a/carbonserver/fetchsinglemetric.go +++ b/carbonserver/fetchsinglemetric.go @@ -157,21 +157,3 @@ func (listener *CarbonserverListener) fetchSingleMetric(metric string, pathExpre return response{}, err } } - -func (listener *CarbonserverListener) fetchSingleMetricV2(metric string, fromTime, untilTime int32) (*protov2.FetchResponse, error) { - resp, err := listener.fetchSingleMetric(metric, "", fromTime, untilTime) - if err != nil { - return nil, err - } - - return resp.proto2(), nil -} - -func (listener *CarbonserverListener) fetchSingleMetricV3(metric string, pathExpression string, fromTime, untilTime int32) (*protov3.FetchResponse, error) { - resp, err := listener.fetchSingleMetric(metric, pathExpression, fromTime, untilTime) - if err != nil { - return nil, err - } - - return resp.proto3(), nil -} diff --git a/carbonserver/render.go b/carbonserver/render.go index 4b9486603..6d0fd1537 100644 --- a/carbonserver/render.go +++ b/carbonserver/render.go @@ -356,7 +356,6 @@ func (listener *CarbonserverListener) fetchWithCache(ctx context.Context, logger func getUniqueMetricNames(targets map[timeRange][]target) []string { metricMap := make(map[string]bool) - for _, ts := range targets { for _, metric := range ts { metricMap[metric.Name] = true @@ -371,16 +370,16 @@ func getUniqueMetricNames(targets map[timeRange][]target) []string { return metricNames } -func (listener *CarbonserverListener) prepareDataProtoStream(ctx context.Context, targets map[timeRange][]target, expandedGlobs []globs, responseChan chan *protov2.FetchResponse) { - defer close(responseChan) - if len(expandedGlobs) == 0 { - return - } +func getMetricGlobMapFromExpandedGlobs(expandedGlobs []globs) map[string]globs { metricGlobMap := make(map[string]globs) for _, expandedGlob := range expandedGlobs { metricGlobMap[strings.ReplaceAll(expandedGlob.Name, "/", ".")] = expandedGlob } + return metricGlobMap +} +func (listener *CarbonserverListener) prepareDataStream(ctx context.Context, format responseFormat, targets map[timeRange][]target, metricGlobMap map[string]globs, responseChan chan response) { + defer close(responseChan) for tr, ts := range targets { for _, metric := range ts { select { @@ -419,7 +418,17 @@ func (listener *CarbonserverListener) prepareDataProtoStream(ctx context.Context zap.Int32("from", fromTime), zap.Int32("until", untilTime), ) - res, err := listener.fetchDataPB(metric.Name, files, leafs, fromTime, untilTime) + var res []response + var err error + if format == protoV2Format || format == jsonFormat { + res, err = listener.fetchData(metric.Name, "", files, leafs, fromTime, untilTime) + } else { + // FIXME: why should we pass metric name instead of path Expression and fill it in afterwards? + res, err = listener.fetchData(metric.Name, metric.Name, files, leafs, fromTime, untilTime) + for i := range res { + res[i].PathExpression = metric.PathExpression + } + } if err != nil { atomic.AddUint64(&listener.metrics.RenderErrors, 1) listener.accessLogger.Error("error while fetching the data", @@ -427,8 +436,8 @@ func (listener *CarbonserverListener) prepareDataProtoStream(ctx context.Context ) continue } - for i := range res.Metrics { - responseChan <- res.Metrics[i] + for i := range res { + responseChan <- res[i] } } else { listener.accessLogger.Debug("expand globs returned an error", @@ -449,96 +458,25 @@ func (listener *CarbonserverListener) prepareDataProto(ctx context.Context, logg var multiv3 protov3.MultiFetchResponse var multiv2 protov2.MultiFetchResponse - metricMap := make(map[string]bool) - - for _, ts := range targets { - for _, metric := range ts { - metricMap[metric.Name] = true - } - } - metricNames := make([]string, len(metricMap)) - i := 0 - for k := range metricMap { - metricNames[i] = k - i++ - } + // TODO: should chan buffer size be configurable? + responseChan := make(chan response, 1000) + metricNames := getUniqueMetricNames(targets) + // TODO: pipeline? expandedGlobs, err := listener.getExpandedGlobs(ctx, logger, time.Now(), metricNames) - if expandedGlobs == nil { return fetchResponse{nil, contentType, 0, 0, 0, nil}, err } - - metricGlobMap := make(map[string]globs) - for _, expandedGlob := range expandedGlobs { - metricGlobMap[strings.ReplaceAll(expandedGlob.Name, "/", ".")] = expandedGlob - } + metricGlobMap := getMetricGlobMapFromExpandedGlobs(expandedGlobs) + go listener.prepareDataStream(ctx, format, targets, metricGlobMap, responseChan) var metrics []string - for tr, ts := range targets { - for _, metric := range ts { - fromTime := tr.from - untilTime := tr.until - - listener.logger.Debug("fetching data...") - if expandedResult, ok := metricGlobMap[metric.Name]; ok { - files, leafs := expandedResult.Files, expandedResult.Leafs - if len(files) > listener.maxMetricsRendered { - listener.accessLogger.Error( - "rendering too many metrics", - zap.Int("limit", listener.maxMetricsRendered), - zap.Int("target", len(files)), - ) - - files = files[:listener.maxMetricsRendered] - leafs = leafs[:listener.maxMetricsRendered] - } - - metricsCount := 0 - for i := range files { - if leafs[i] { - metricsCount++ - } - } - listener.accessLogger.Debug("expandGlobs result", - zap.String("handler", "render"), - zap.String("action", "expandGlobs"), - zap.String("metric", metric.Name), - zap.Int("metrics_count", metricsCount), - zap.Int32("from", fromTime), - zap.Int32("until", untilTime), - ) - - if format == protoV2Format || format == jsonFormat { - res, err := listener.fetchDataPB(metric.Name, files, leafs, fromTime, untilTime) - if err != nil { - atomic.AddUint64(&listener.metrics.RenderErrors, 1) - listener.accessLogger.Error("error while fetching the data", - zap.Error(err), - ) - continue - } - multiv2.Metrics = append(multiv2.Metrics, res.Metrics...) - } else { - res, err := listener.fetchDataPB3(metric.Name, files, leafs, fromTime, untilTime) - if err != nil { - atomic.AddUint64(&listener.metrics.RenderErrors, 1) - listener.accessLogger.Error("error while fetching the data", - zap.Error(err), - ) - continue - } - for i := range res.Metrics { - res.Metrics[i].PathExpression = metric.PathExpression - } - multiv3.Metrics = append(multiv3.Metrics, res.Metrics...) - } - } else { - listener.accessLogger.Debug("expand globs returned an error", - zap.Error(err), - ) - continue - } - + for renderResponse := range responseChan { + if format == protoV2Format || format == jsonFormat { + res := renderResponse.proto2() + multiv2.Metrics = append(multiv2.Metrics, res) + } else { + res := renderResponse.proto3() + multiv3.Metrics = append(multiv3.Metrics, res) } } @@ -627,42 +565,19 @@ func (listener *CarbonserverListener) prepareDataProto(ctx context.Context, logg return fetchResponse{b, contentType, metricsFetched, valuesFetched, memoryUsed, metrics}, nil } -func (listener *CarbonserverListener) fetchDataPB3(pathExpression string, files []string, leafs []bool, fromTime, untilTime int32) (*protov3.MultiFetchResponse, error) { - var multi protov3.MultiFetchResponse +func (listener *CarbonserverListener) fetchData(metric, pathExpression string, files []string, leafs []bool, fromTime, untilTime int32) ([]response, error) { + var multi []response var errs []error for i, fileName := range files { if !leafs[i] { - listener.logger.Debug("skipping directory", zap.String("PathExpression", pathExpression)) - // can't fetch a directory - continue - } - response, err := listener.fetchSingleMetricV3(fileName, pathExpression, fromTime, untilTime) - if err == nil { - multi.Metrics = append(multi.Metrics, response) - } else { - errs = append(errs, err) - } - } - if len(errs) != 0 { - listener.logger.Warn("errors occur while fetching data", - zap.Any("errors", errs), - ) - } - return &multi, nil -} - -func (listener *CarbonserverListener) fetchDataPB(metric string, files []string, leafs []bool, fromTime, untilTime int32) (*protov2.MultiFetchResponse, error) { - var multi protov2.MultiFetchResponse - var errs []error - for i, fileMetric := range files { - if !leafs[i] { - listener.logger.Debug("skipping directory", zap.String("metricName", metric), zap.String("fileMetric", fileMetric)) + listener.logger.Debug("skipping directory", zap.String("PathExpression", pathExpression), + zap.String("metricName", metric), zap.String("fileName", fileName)) // can't fetch a directory continue } - response, err := listener.fetchSingleMetricV2(fileMetric, fromTime, untilTime) + response, err := listener.fetchSingleMetric(fileName, pathExpression, fromTime, untilTime) if err == nil { - multi.Metrics = append(multi.Metrics, response) + multi = append(multi, response) } else { errs = append(errs, err) } @@ -672,7 +587,7 @@ func (listener *CarbonserverListener) fetchDataPB(metric string, files []string, zap.Any("errors", errs), ) } - return &multi, nil + return multi, nil } // Render implements Render rpc of CarbonV2 gRPC service @@ -733,7 +648,7 @@ func (listener *CarbonserverListener) Render(req *protov2.MultiFetchRequest, str fromCache := false // TODO: should chan buffer size be configurable? - responseChan := make(chan *protov2.FetchResponse, 1000) + responseChan := make(chan response, 1000) metricNames := getUniqueMetricNames(targets) // TODO: pipeline? @@ -743,8 +658,8 @@ func (listener *CarbonserverListener) Render(req *protov2.MultiFetchRequest, str return status.New(codes.InvalidArgument, err.Error()).Err() } } - - go listener.prepareDataProtoStream(ctx, targets, expandedGlobs, responseChan) + metricGlobMap := getMetricGlobMapFromExpandedGlobs(expandedGlobs) + go listener.prepareDataStream(ctx, format, targets, metricGlobMap, responseChan) metricsFetched := 0 valuesFetched := 0 @@ -752,7 +667,7 @@ func (listener *CarbonserverListener) Render(req *protov2.MultiFetchRequest, str metricAccessBatchSize := 100 metricAccessBatch := make([]string, 0, metricAccessBatchSize) for renderResponse := range responseChan { - err := stream.Send(renderResponse) + err := stream.Send(renderResponse.proto2()) if err != nil { atomic.AddUint64(&listener.metrics.RenderErrors, 1) accessLogger.Error("fetch failed", From 504d7ad85ca3389a36219213f3379ca0bb869313 Mon Sep 17 00:00:00 2001 From: Emad Mohamadi Date: Fri, 15 Jul 2022 13:51:22 +0200 Subject: [PATCH 08/11] Fix deepsource failures --- carbonserver/carbonserver.go | 2 +- carbonserver/carbonserver_test.go | 6 +++--- carbonserver/render.go | 5 +---- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/carbonserver/carbonserver.go b/carbonserver/carbonserver.go index 53edc7a09..4b3c046c4 100644 --- a/carbonserver/carbonserver.go +++ b/carbonserver/carbonserver.go @@ -1997,7 +1997,7 @@ func (listener *CarbonserverListener) ListenGRPC(listen string) error { var opts []grpc.ServerOption - grpcServer := grpc.NewServer(opts...) + grpcServer := grpc.NewServer(opts...) //skipcq: GO-S0902 grpcv2.RegisterCarbonV2Server(grpcServer, listener) go grpcServer.Serve(listener.grpcListener) return nil diff --git a/carbonserver/carbonserver_test.go b/carbonserver/carbonserver_test.go index b7e4a650e..d29303c3f 100644 --- a/carbonserver/carbonserver_test.go +++ b/carbonserver/carbonserver_test.go @@ -171,7 +171,7 @@ func generalFetchSingleMetricRemove(testData *FetchTest) { os.Remove(filepath.Join(testData.path, testData.name+".wsp")) } -func generalFetchSingleMetricHelper(testData *FetchTest, cache *cache.Cache, carbonserver *CarbonserverListener) (*pb.FetchResponse, error) { +func generalFetchSingleMetricHelper(testData *FetchTest, carbonserver *CarbonserverListener) (*pb.FetchResponse, error) { data, err := carbonserver.fetchSingleMetric(testData.name, "", int32(testData.from), int32(testData.until)) if err != nil { return nil, err @@ -185,7 +185,7 @@ func testFetchSingleMetricHelper(testData *FetchTest, cache *cache.Cache, carbon return nil, err } defer generalFetchSingleMetricRemove(testData) - data, err := generalFetchSingleMetricHelper(testData, cache, carbonserver) + data, err := generalFetchSingleMetricHelper(testData, carbonserver) return data, err } @@ -517,7 +517,7 @@ func benchmarkFetchSingleMetricCommon(b *testing.B, test *FetchTest) { b.ResetTimer() for runs := 0; runs < b.N; runs++ { - data, err := generalFetchSingleMetricHelper(test, cache, carbonserver) + data, err := generalFetchSingleMetricHelper(test, carbonserver) if err != nil { b.Errorf("Unexpected error: %v", err) return diff --git a/carbonserver/render.go b/carbonserver/render.go index 6d0fd1537..5fa5d7505 100644 --- a/carbonserver/render.go +++ b/carbonserver/render.go @@ -525,10 +525,7 @@ func (listener *CarbonserverListener) prepareDataProto(ctx context.Context, logg var response []map[string]interface{} for _, metric := range multiv3.GetMetrics() { - - var m map[string]interface{} //nolint:gosimple - - m = make(map[string]interface{}) + m := make(map[string]interface{}) m["start"] = metric.StartTime m["step"] = metric.StepTime m["end"] = metric.StopTime From f48de7b0ee901a098222dc2c8d817efd85d3b81a Mon Sep 17 00:00:00 2001 From: Emad Mohamadi Date: Fri, 15 Jul 2022 15:33:17 +0200 Subject: [PATCH 09/11] Handle context done in render for prepare data --- carbonserver/render.go | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/carbonserver/render.go b/carbonserver/render.go index 5fa5d7505..5aeef61d2 100644 --- a/carbonserver/render.go +++ b/carbonserver/render.go @@ -480,6 +480,20 @@ func (listener *CarbonserverListener) prepareDataProto(ctx context.Context, logg } } + select { + case <-ctx.Done(): + switch ctx.Err() { + case context.DeadlineExceeded: + listener.prometheus.timeoutRequest() + case context.Canceled: + listener.prometheus.cancelledRequest() + } + + err := fmt.Errorf("time out while preparing data proto") + return fetchResponse{nil, contentType, 0, 0, 0, nil}, err + default: + } + if format == protoV2Format || format == jsonFormat { if len(multiv2.Metrics) == 0 && format == protoV2Format { return fetchResponse{nil, contentType, 0, 0, 0, nil}, err @@ -685,6 +699,19 @@ func (listener *CarbonserverListener) Render(req *protov2.MultiFetchRequest, str } } + select { + case <-ctx.Done(): + switch ctx.Err() { + case context.DeadlineExceeded: + listener.prometheus.timeoutRequest() + case context.Canceled: + listener.prometheus.cancelledRequest() + } + + return status.New(codes.DeadlineExceeded, "time out while preparing data proto").Err() + default: + } + if metricsFetched == 0 && !listener.emptyResultOk { atomic.AddUint64(&listener.metrics.RenderErrors, 1) accessLogger.Error("fetch failed", From f7e0949fe509bf1685dbaa403065a8112d840ab7 Mon Sep 17 00:00:00 2001 From: Emad Mohamadi Date: Mon, 18 Jul 2022 11:52:29 +0200 Subject: [PATCH 10/11] Update gRPC config comments and docs --- README.md | 6 ++++++ go-carbon.conf.example | 1 + 2 files changed, 7 insertions(+) diff --git a/README.md b/README.md index e58fd5a11..6a8800d36 100644 --- a/README.md +++ b/README.md @@ -528,6 +528,12 @@ stats-percentiles = [99, 98, 95, 75, 50] # path = "/metrics/list_query/" # max-inflight-requests = 3 +# carbonserver.grpc is the configuration for listening for grpc clients. +# Note: currently, only CarbonV2 Render rpc is implemented. +# [carbonserver.grpc] +# enabled = true +# listen = ":7004" + [dump] # Enable dump/restore function on USR2 signal enabled = false diff --git a/go-carbon.conf.example b/go-carbon.conf.example index eec80ceed..76899421a 100644 --- a/go-carbon.conf.example +++ b/go-carbon.conf.example @@ -427,6 +427,7 @@ stats-percentiles = [99, 98, 95, 75, 50] # max-inflight-requests = 3 # carbonserver.grpc is the configuration for listening for grpc clients. +# Note: currently, only CarbonV2 Render rpc is implemented. # [carbonserver.grpc] # enabled = true # listen = ":7004" From 3f539954dbd5d4d147fb9d1d967be34fc1b41e04 Mon Sep 17 00:00:00 2001 From: Emad Mohamadi Date: Mon, 18 Jul 2022 15:30:50 +0200 Subject: [PATCH 11/11] Catchup with go-graphite/protocol master with grpc --- go.mod | 2 +- go.sum | 4 ++-- .../protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto | 2 ++ vendor/modules.txt | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 61723609b..1dcf07771 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/dgryski/httputil v0.0.0-20160116060654-189c2918cd08 github.com/go-graphite/carbonzipper v0.0.0-20180329125635-fedce067a794 github.com/go-graphite/go-whisper v0.0.0-20220706140940-0fdbe10fe673 - github.com/go-graphite/protocol v1.0.1-0.20220713140022-0180df7ab791 + github.com/go-graphite/protocol v1.0.1-0.20220718132526-4b842ba389ee github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect github.com/google/go-cmp v0.5.8 diff --git a/go.sum b/go.sum index 92823f393..b97493aa0 100644 --- a/go.sum +++ b/go.sum @@ -88,8 +88,8 @@ github.com/go-graphite/carbonzipper v0.0.0-20180329125635-fedce067a794 h1:9N+1I8 github.com/go-graphite/carbonzipper v0.0.0-20180329125635-fedce067a794/go.mod h1:sfZ+AkP8/bBcWSGVqhseV6t6e/+C0i/PkTpA32W5rXs= github.com/go-graphite/go-whisper v0.0.0-20220706140940-0fdbe10fe673 h1:KXBNR5tGEMaq2ZOtUNgRdPEJc5LfNFUQj8LGsaRyeXA= github.com/go-graphite/go-whisper v0.0.0-20220706140940-0fdbe10fe673/go.mod h1:1edRhfqJoiHSjN72nYptHK0YR4yYt8aPC4NRHWhT0XE= -github.com/go-graphite/protocol v1.0.1-0.20220713140022-0180df7ab791 h1:W2oVuFO138fYSMWIzDlHlL/4y6OSWFEPAbi55WQkOFY= -github.com/go-graphite/protocol v1.0.1-0.20220713140022-0180df7ab791/go.mod h1:puWkW2DFZi46CaLY1rxYhkDiBDFlpaTDaMjcE/Cd9gw= +github.com/go-graphite/protocol v1.0.1-0.20220718132526-4b842ba389ee h1:Sb63UbLUOXtKFIfNfZIkWrs73vWD0ZKzrBdTjvlVOBQ= +github.com/go-graphite/protocol v1.0.1-0.20220718132526-4b842ba389ee/go.mod h1:puWkW2DFZi46CaLY1rxYhkDiBDFlpaTDaMjcE/Cd9gw= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= diff --git a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto index 4b2522954..c729a86b1 100644 --- a/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto +++ b/vendor/github.com/go-graphite/protocol/carbonapi_v2_grpc/carbonapi_v2_grpc.proto @@ -7,6 +7,8 @@ import "google/protobuf/empty.proto"; import "carbonapi_v2_pb/carbonapi_v2_pb.proto"; service CarbonV2 { + // Render rpc fetches the metrics requested from the startTime until stopTime or metric end time, + // and sends each metric in a FetchResponse through the stream. rpc Render(carbonapi_v2_pb.MultiFetchRequest) returns (stream carbonapi_v2_pb.FetchResponse) {} rpc Find(carbonapi_v2_pb.GlobRequest) returns (carbonapi_v2_pb.GlobResponse) {} rpc List(google.protobuf.Empty) returns (carbonapi_v2_pb.ListMetricsResponse) {} diff --git a/vendor/modules.txt b/vendor/modules.txt index 9d6a3e8ec..cf1d9340d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -59,7 +59,7 @@ github.com/go-graphite/carbonzipper/zipper/httpHeaders # github.com/go-graphite/go-whisper v0.0.0-20220706140940-0fdbe10fe673 ## explicit; go 1.18 github.com/go-graphite/go-whisper -# github.com/go-graphite/protocol v1.0.1-0.20220713140022-0180df7ab791 +# github.com/go-graphite/protocol v1.0.1-0.20220718132526-4b842ba389ee ## explicit; go 1.16 github.com/go-graphite/protocol/carbonapi_v2_grpc github.com/go-graphite/protocol/carbonapi_v2_pb