From a611151de07e39686aefaeaacb8df6f7f1eb5d14 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 22:34:35 +0000 Subject: [PATCH] Bump github.com/spiffe/spire-api-sdk from 1.8.5 to 1.8.7 Bumps [github.com/spiffe/spire-api-sdk](https://github.com/spiffe/spire-api-sdk) from 1.8.5 to 1.8.7. - [Commits](https://github.com/spiffe/spire-api-sdk/compare/v1.8.5...v1.8.7) --- updated-dependencies: - dependency-name: github.com/spiffe/spire-api-sdk dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 80 +- go.sum | 179 +- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 10 + .../sdk/azcore/internal/shared/constants.go | 4 +- .../sdk/azcore/internal/shared/shared.go | 62 +- .../aws/aws-sdk-go-v2/aws/config.go | 12 + .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 14 + .../aws/aws-sdk-go-v2/config/config.go | 8 +- .../aws/aws-sdk-go-v2/config/env_config.go | 58 + .../config/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/config/load_options.go | 46 + .../aws/aws-sdk-go-v2/config/provider.go | 34 + .../aws/aws-sdk-go-v2/config/resolve.go | 27 + .../aws/aws-sdk-go-v2/config/shared_config.go | 94 +- .../aws-sdk-go-v2/credentials/CHANGELOG.md | 12 + .../credentials/go_module_metadata.go | 2 +- .../feature/ec2/imds/CHANGELOG.md | 4 + .../feature/ec2/imds/go_module_metadata.go | 2 +- .../internal/configsources/CHANGELOG.md | 4 + .../configsources/go_module_metadata.go | 2 +- .../endpoints/awsrulesfn/partitions.go | 40 +- .../internal/endpoints/v2/CHANGELOG.md | 4 + .../endpoints/v2/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/ini/CHANGELOG.md | 4 + .../internal/ini/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/internal/ini/value.go | 13 +- .../internal/accept-encoding/CHANGELOG.md | 4 + .../accept-encoding/go_module_metadata.go | 2 +- .../internal/presigned-url/CHANGELOG.md | 4 + .../presigned-url/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/kms/CHANGELOG.md | 16 + .../aws-sdk-go-v2/service/kms/api_client.go | 18 +- .../service/kms/api_op_CancelKeyDeletion.go | 5 +- .../kms/api_op_ConnectCustomKeyStore.go | 4 + .../service/kms/api_op_CreateAlias.go | 4 + .../kms/api_op_CreateCustomKeyStore.go | 4 + .../service/kms/api_op_CreateGrant.go | 4 + .../service/kms/api_op_CreateKey.go | 7 +- .../service/kms/api_op_Decrypt.go | 20 +- .../service/kms/api_op_DeleteAlias.go | 4 + .../kms/api_op_DeleteCustomKeyStore.go | 4 + .../kms/api_op_DeleteImportedKeyMaterial.go | 4 + .../kms/api_op_DescribeCustomKeyStores.go | 4 + .../service/kms/api_op_DescribeKey.go | 17 +- .../service/kms/api_op_DisableKey.go | 5 +- .../service/kms/api_op_DisableKeyRotation.go | 4 + .../kms/api_op_DisconnectCustomKeyStore.go | 4 + .../service/kms/api_op_EnableKey.go | 5 +- .../service/kms/api_op_EnableKeyRotation.go | 4 + .../service/kms/api_op_Encrypt.go | 4 + .../service/kms/api_op_GenerateDataKey.go | 4 + .../service/kms/api_op_GenerateDataKeyPair.go | 6 +- ..._op_GenerateDataKeyPairWithoutPlaintext.go | 4 + .../api_op_GenerateDataKeyWithoutPlaintext.go | 4 + .../service/kms/api_op_GenerateMac.go | 5 +- .../service/kms/api_op_GenerateRandom.go | 4 +- .../service/kms/api_op_GetKeyPolicy.go | 5 +- .../kms/api_op_GetKeyRotationStatus.go | 4 + .../kms/api_op_GetParametersForImport.go | 18 +- .../service/kms/api_op_GetPublicKey.go | 5 +- .../service/kms/api_op_ImportKeyMaterial.go | 4 + .../service/kms/api_op_ListAliases.go | 4 + .../service/kms/api_op_ListGrants.go | 4 + .../service/kms/api_op_ListKeyPolicies.go | 6 +- .../service/kms/api_op_ListKeys.go | 4 + .../service/kms/api_op_ListResourceTags.go | 4 + .../service/kms/api_op_ListRetirableGrants.go | 22 +- .../service/kms/api_op_PutKeyPolicy.go | 8 +- .../service/kms/api_op_ReEncrypt.go | 4 + .../service/kms/api_op_ReplicateKey.go | 15 +- .../service/kms/api_op_RetireGrant.go | 6 +- .../service/kms/api_op_RevokeGrant.go | 4 + .../service/kms/api_op_ScheduleKeyDeletion.go | 26 +- .../aws-sdk-go-v2/service/kms/api_op_Sign.go | 5 +- .../service/kms/api_op_TagResource.go | 4 + .../service/kms/api_op_UntagResource.go | 4 + .../service/kms/api_op_UpdateAlias.go | 4 + .../kms/api_op_UpdateCustomKeyStore.go | 4 + .../kms/api_op_UpdateKeyDescription.go | 4 + .../service/kms/api_op_UpdatePrimaryRegion.go | 4 + .../service/kms/api_op_Verify.go | 5 +- .../service/kms/api_op_VerifyMac.go | 5 +- .../aws/aws-sdk-go-v2/service/kms/auth.go | 28 + .../aws-sdk-go-v2/service/kms/endpoints.go | 2 +- .../service/kms/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/kms/options.go | 8 +- .../aws-sdk-go-v2/service/kms/types/errors.go | 4 +- .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 12 + .../aws-sdk-go-v2/service/sso/api_client.go | 18 +- .../aws/aws-sdk-go-v2/service/sso/auth.go | 28 + .../service/sso/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/sso/options.go | 8 +- .../service/ssooidc/CHANGELOG.md | 12 + .../service/ssooidc/api_client.go | 18 +- .../aws/aws-sdk-go-v2/service/ssooidc/auth.go | 28 + .../service/ssooidc/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/ssooidc/options.go | 8 +- .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 13 + .../aws-sdk-go-v2/service/sts/api_client.go | 18 +- .../aws/aws-sdk-go-v2/service/sts/auth.go | 50 +- .../service/sts/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/sts/options.go | 8 +- vendor/github.com/aws/smithy-go/CHANGELOG.md | 6 + .../aws/smithy-go/go_module_metadata.go | 2 +- .../private/requestcompression/gzip.go | 30 + .../middleware_capture_request_compression.go | 52 + .../requestcompression/request_compression.go | 103 + .../github.com/felixge/httpsnoop/.gitignore | 0 .../github.com/felixge/httpsnoop/LICENSE.txt | 19 + vendor/github.com/felixge/httpsnoop/Makefile | 10 + vendor/github.com/felixge/httpsnoop/README.md | 95 + .../felixge/httpsnoop/capture_metrics.go | 86 + vendor/github.com/felixge/httpsnoop/docs.go | 10 + .../httpsnoop/wrap_generated_gteq_1.8.go | 436 ++ .../httpsnoop/wrap_generated_lt_1.8.go | 278 ++ vendor/github.com/go-kit/log/README.md | 5 + vendor/github.com/go-kit/log/json_logger.go | 4 +- vendor/github.com/go-kit/log/level/doc.go | 11 + vendor/github.com/go-kit/log/level/level.go | 53 +- vendor/github.com/go-kit/log/staticcheck.conf | 1 + vendor/github.com/go-logr/logr/README.md | 113 +- vendor/github.com/go-logr/logr/SECURITY.md | 18 + vendor/github.com/go-logr/logr/funcr/funcr.go | 48 +- vendor/github.com/go-logr/logr/logr.go | 35 +- .../jellydator/ttlcache/v3/cache.go | 7 + .../letsencrypt/boulder/core/challenges.go | 18 + .../letsencrypt/boulder/core/interfaces.go | 7 +- .../letsencrypt/boulder/core/objects.go | 139 +- .../letsencrypt/boulder/core/proto/core.pb.go | 1182 ----- .../letsencrypt/boulder/core/proto/core.proto | 101 - .../letsencrypt/boulder/core/util.go | 47 +- .../letsencrypt/boulder/errors/errors.go | 194 - .../boulder/features/featureflag_string.go | 56 - .../letsencrypt/boulder/features/features.go | 203 - .../letsencrypt/boulder/goodkey/blocked.go | 5 +- .../letsencrypt/boulder/goodkey/good_key.go | 53 +- .../letsencrypt/boulder/probs/probs.go | 328 +- .../letsencrypt/boulder/sa/proto/sa.pb.go | 4261 ----------------- .../letsencrypt/boulder/sa/proto/sa.proto | 353 -- .../boulder/sa/proto/sa_grpc.pb.go | 2937 ------------ .../letsencrypt/boulder/sa/proto/subsets.go | 47 - .../letsencrypt/boulder/strictyaml/yaml.go | 46 + .../client_golang/prometheus/counter.go | 26 +- .../client_golang/prometheus/desc.go | 46 +- .../client_golang/prometheus/doc.go | 44 +- .../client_golang/prometheus/gauge.go | 26 +- .../prometheus/go_collector_latest.go | 7 +- .../client_golang/prometheus/histogram.go | 51 +- .../client_golang/prometheus/labels.go | 72 + .../client_golang/prometheus/metric.go | 6 +- .../prometheus/promhttp/instrument_client.go | 26 +- .../prometheus/promhttp/instrument_server.go | 101 +- .../prometheus/promhttp/option.go | 38 +- .../client_golang/prometheus/registry.go | 17 +- .../client_golang/prometheus/summary.go | 39 +- .../client_golang/prometheus/timer.go | 28 +- .../client_golang/prometheus/value.go | 10 +- .../client_golang/prometheus/vec.go | 54 +- .../client_golang/prometheus/vnext.go | 23 + .../client_golang/prometheus/wrap.go | 8 +- .../prometheus/client_model/go/metrics.pb.go | 1530 +++--- .../prometheus/common/expfmt/decode.go | 36 +- .../prometheus/common/expfmt/fuzz.go | 4 +- .../common/expfmt/openmetrics_create.go | 22 +- .../prometheus/common/expfmt/text_create.go | 3 +- .../prometheus/common/expfmt/text_parse.go | 10 +- .../bitbucket.org/ww/goautoneg/autoneg.go | 22 +- .../prometheus/common/model/time.go | 89 +- .../prometheus/common/model/value.go | 246 +- .../prometheus/common/model/value_float.go | 100 + .../common/model/value_histogram.go | 178 + .../prometheus/common/model/value_type.go | 83 + .../prometheus/procfs/Makefile.common | 9 +- .../github.com/prometheus/procfs/cpuinfo.go | 36 + .../prometheus/procfs/cpuinfo_loong64.go | 19 + .../prometheus/procfs/cpuinfo_others.go | 4 +- vendor/github.com/prometheus/procfs/doc.go | 51 +- .../prometheus/procfs/mountstats.go | 3 +- .../prometheus/procfs/net_softnet.go | 70 +- .../github.com/prometheus/procfs/netstat.go | 53 +- .../prometheus/procfs/proc_cgroup.go | 2 +- .../prometheus/procfs/proc_interrupts.go | 98 + .../prometheus/procfs/proc_netstat.go | 491 +- .../github.com/prometheus/procfs/proc_snmp.go | 318 +- .../prometheus/procfs/proc_snmp6.go | 364 +- .../github.com/prometheus/procfs/proc_stat.go | 4 +- .../prometheus/procfs/proc_status.go | 6 +- vendor/github.com/prometheus/procfs/stat.go | 22 +- vendor/github.com/prometheus/procfs/thread.go | 79 + vendor/github.com/prometheus/procfs/vm.go | 4 +- .../spire/api/server/entry/v1/entry.pb.go | 504 +- .../spire/api/server/entry/v1/entry.proto | 60 +- .../api/server/entry/v1/entry_grpc.pb.go | 105 +- .../google.golang.org/grpc/otelgrpc/LICENSE | 201 + .../google.golang.org/grpc/otelgrpc/config.go | 229 + .../google.golang.org/grpc/otelgrpc/doc.go | 22 + .../grpc/otelgrpc/interceptor.go | 540 +++ .../grpc/otelgrpc/interceptorinfo.go | 50 + .../grpc/otelgrpc/internal/parse.go | 51 + .../grpc/otelgrpc/metadata_supplier.go | 98 + .../grpc/otelgrpc/semconv.go | 52 + .../grpc/otelgrpc/stats_handler.go | 238 + .../grpc/otelgrpc/version.go | 28 + .../instrumentation/net/http/otelhttp/LICENSE | 201 + .../net/http/otelhttp/client.go | 61 + .../net/http/otelhttp/common.go | 46 + .../net/http/otelhttp/config.go | 207 + .../instrumentation/net/http/otelhttp/doc.go | 18 + .../net/http/otelhttp/handler.go | 287 ++ .../http/otelhttp/internal/semconvutil/gen.go | 21 + .../otelhttp/internal/semconvutil/httpconv.go | 552 +++ .../otelhttp/internal/semconvutil/netconv.go | 368 ++ .../net/http/otelhttp/labeler.go | 65 + .../net/http/otelhttp/transport.go | 193 + .../net/http/otelhttp/version.go | 28 + .../instrumentation/net/http/otelhttp/wrap.go | 99 + vendor/go.opentelemetry.io/otel/.gitignore | 5 +- vendor/go.opentelemetry.io/otel/.golangci.yml | 17 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 85 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 4 + vendor/go.opentelemetry.io/otel/Makefile | 29 +- vendor/go.opentelemetry.io/otel/README.md | 15 +- .../otel/baggage/baggage.go | 4 +- .../otel/internal/global/instruments.go | 60 +- .../otel/internal/global/trace.go | 7 + vendor/go.opentelemetry.io/otel/metric/doc.go | 2 +- .../otel/metric/instrument.go | 23 + .../otel/metric/syncfloat64.go | 10 +- .../otel/metric/syncint64.go | 10 +- .../otel/propagation/trace_context.go | 6 +- .../go.opentelemetry.io/otel/requirements.txt | 2 +- .../otel/semconv/v1.17.0/doc.go | 20 + .../otel/semconv/v1.17.0/event.go | 199 + .../otel/semconv/v1.17.0/exception.go | 20 + .../otel/semconv/v1.17.0/http.go | 21 + .../otel/semconv/v1.17.0/resource.go | 2010 ++++++++ .../otel/semconv/v1.17.0/schema.go | 20 + .../otel/semconv/v1.17.0/trace.go | 3375 +++++++++++++ .../go.opentelemetry.io/otel/trace/config.go | 1 + vendor/go.opentelemetry.io/otel/trace/doc.go | 64 + .../otel/trace/embedded/embedded.go | 56 + vendor/go.opentelemetry.io/otel/trace/noop.go | 10 +- .../go.opentelemetry.io/otel/trace/trace.go | 40 +- .../otel/trace/tracestate.go | 38 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 7 +- .../api/internal/settings.go | 2 + .../google.golang.org/api/internal/version.go | 2 +- .../option/internaloption/internaloption.go | 16 + vendor/google.golang.org/api/option/option.go | 13 + .../api/transport/grpc/dial.go | 14 +- .../api/transport/http/dial.go | 11 + .../go-jose/go-jose.v2/.gitcookies.sh.enc | 1 + vendor/gopkg.in/go-jose/go-jose.v2/.gitignore | 8 + .../gopkg.in/go-jose/go-jose.v2/.travis.yml | 45 + .../go-jose/go-jose.v2/CONTRIBUTING.md | 14 + vendor/gopkg.in/go-jose/go-jose.v2/LICENSE | 202 + vendor/gopkg.in/go-jose/go-jose.v2/README.md | 118 + .../gopkg.in/go-jose/go-jose.v2/asymmetric.go | 592 +++ .../go-jose/go-jose.v2/cipher/cbc_hmac.go | 196 + .../go-jose/go-jose.v2/cipher/concat_kdf.go | 75 + .../go-jose/go-jose.v2/cipher/ecdh_es.go | 86 + .../go-jose/go-jose.v2/cipher/key_wrap.go | 109 + vendor/gopkg.in/go-jose/go-jose.v2/crypter.go | 542 +++ vendor/gopkg.in/go-jose/go-jose.v2/doc.go | 27 + .../gopkg.in/go-jose/go-jose.v2/encoding.go | 185 + .../gopkg.in/go-jose/go-jose.v2/json/LICENSE | 27 + .../go-jose/go-jose.v2/json/README.md | 13 + .../go-jose/go-jose.v2/json/decode.go | 1217 +++++ .../go-jose/go-jose.v2/json/encode.go | 1197 +++++ .../go-jose/go-jose.v2/json/indent.go | 141 + .../go-jose/go-jose.v2/json/scanner.go | 623 +++ .../go-jose/go-jose.v2/json/stream.go | 485 ++ .../gopkg.in/go-jose/go-jose.v2/json/tags.go | 44 + vendor/gopkg.in/go-jose/go-jose.v2/jwe.go | 294 ++ vendor/gopkg.in/go-jose/go-jose.v2/jwk.go | 760 +++ vendor/gopkg.in/go-jose/go-jose.v2/jws.go | 366 ++ vendor/gopkg.in/go-jose/go-jose.v2/opaque.go | 144 + vendor/gopkg.in/go-jose/go-jose.v2/shared.go | 520 ++ vendor/gopkg.in/go-jose/go-jose.v2/signing.go | 441 ++ .../gopkg.in/go-jose/go-jose.v2/symmetric.go | 482 ++ vendor/modules.txt | 110 +- 283 files changed, 25086 insertions(+), 11855 deletions(-) create mode 100644 vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go create mode 100644 vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go create mode 100644 vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go create mode 100644 vendor/github.com/felixge/httpsnoop/.gitignore create mode 100644 vendor/github.com/felixge/httpsnoop/LICENSE.txt create mode 100644 vendor/github.com/felixge/httpsnoop/Makefile create mode 100644 vendor/github.com/felixge/httpsnoop/README.md create mode 100644 vendor/github.com/felixge/httpsnoop/capture_metrics.go create mode 100644 vendor/github.com/felixge/httpsnoop/docs.go create mode 100644 vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go create mode 100644 vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go create mode 100644 vendor/github.com/go-kit/log/staticcheck.conf create mode 100644 vendor/github.com/go-logr/logr/SECURITY.md delete mode 100644 vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go delete mode 100644 vendor/github.com/letsencrypt/boulder/core/proto/core.proto delete mode 100644 vendor/github.com/letsencrypt/boulder/errors/errors.go delete mode 100644 vendor/github.com/letsencrypt/boulder/features/featureflag_string.go delete mode 100644 vendor/github.com/letsencrypt/boulder/features/features.go delete mode 100644 vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go delete mode 100644 vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto delete mode 100644 vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go delete mode 100644 vendor/github.com/letsencrypt/boulder/sa/proto/subsets.go create mode 100644 vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vnext.go create mode 100644 vendor/github.com/prometheus/common/model/value_float.go create mode 100644 vendor/github.com/prometheus/common/model/value_histogram.go create mode 100644 vendor/github.com/prometheus/common/model/value_type.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_loong64.go create mode 100644 vendor/github.com/prometheus/procfs/proc_interrupts.go create mode 100644 vendor/github.com/prometheus/procfs/thread.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/.gitignore create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/.travis.yml create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/LICENSE create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/README.md create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/cipher/concat_kdf.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/cipher/ecdh_es.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/crypter.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/doc.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/encoding.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/json/LICENSE create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/json/README.md create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/json/indent.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/json/scanner.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/json/stream.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/json/tags.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/jwe.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/jwk.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/jws.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/opaque.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/shared.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/signing.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/symmetric.go diff --git a/go.mod b/go.mod index e0bd67b1b7a..e75e93f74ab 100644 --- a/go.mod +++ b/go.mod @@ -20,11 +20,11 @@ require ( github.com/pkg/errors v0.9.1 github.com/sigstore/sigstore v1.7.5 github.com/spiffe/go-spiffe/v2 v2.1.5 - github.com/spiffe/spire-api-sdk v1.8.5 + github.com/spiffe/spire-api-sdk v1.8.7 github.com/tektoncd/plumbing v0.0.0-20220817140952-3da8ce01aeeb go.opencensus.io v0.24.0 go.uber.org/zap v1.26.0 - golang.org/x/exp v0.0.0-20230307190834-24139beb5833 + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 golang.org/x/oauth2 v0.15.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 gopkg.in/square/go-jose.v2 v2.6.0 @@ -52,14 +52,14 @@ require ( github.com/google/cel-go v0.18.1 github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230625233257-b8504803389b github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa - github.com/sigstore/sigstore/pkg/signature/kms/aws v1.7.6 - github.com/sigstore/sigstore/pkg/signature/kms/azure v1.7.6 - github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.7.6 - github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.7.6 - go.opentelemetry.io/otel v1.19.0 + github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.0 + github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.0 + github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.0 + github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.0 + go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 go.opentelemetry.io/otel/sdk v1.19.0 - go.opentelemetry.io/otel/trace v1.19.0 + go.opentelemetry.io/otel/trace v1.21.0 k8s.io/utils v0.0.0-20230505201702-9f6742963106 ) @@ -81,17 +81,17 @@ require ( cloud.google.com/go/iam v1.1.5 // indirect cloud.google.com/go/kms v1.15.5 // indirect dario.cat/mergo v1.0.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect github.com/Microsoft/hcsshim v0.11.4 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 // indirect - github.com/aws/aws-sdk-go-v2/service/kms v1.27.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.27.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 // indirect github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/cloudflare/circl v1.3.3 // indirect github.com/containerd/log v0.1.0 // indirect @@ -99,6 +99,7 @@ require ( github.com/davidmz/go-pageant v1.0.2 // indirect github.com/emicklei/go-restful/v3 v3.10.2 // indirect github.com/fatih/color v1.13.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-fed/httpsig v1.1.0 // indirect github.com/go-jose/go-jose/v3 v3.0.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -116,9 +117,9 @@ require ( github.com/hashicorp/go-sockaddr v1.0.2 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/vault/api v1.10.0 // indirect - github.com/jellydator/ttlcache/v3 v3.1.0 // indirect + github.com/jellydator/ttlcache/v3 v3.1.1 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect + github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e // indirect github.com/mattn/go-colorable v0.1.9 // indirect github.com/mattn/go-isatty v0.0.14 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -126,14 +127,17 @@ require ( github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect github.com/skeema/knownhosts v1.2.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/zeebo/errs v1.3.0 // indirect - go.opentelemetry.io/otel/metric v1.19.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect + gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect ) require ( @@ -149,19 +153,19 @@ require ( github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/aws/aws-sdk-go-v2 v1.23.5 // indirect - github.com/aws/aws-sdk-go-v2/config v1.25.11 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.16.9 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2 v1.24.0 // indirect + github.com/aws/aws-sdk-go-v2/config v1.26.1 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.16.12 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 // indirect github.com/aws/aws-sdk-go-v2/service/ecr v1.18.11 // indirect github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.16.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.18.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.26.2 // indirect - github.com/aws/smithy-go v1.18.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 // indirect + github.com/aws/smithy-go v1.19.0 // indirect github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect @@ -179,9 +183,9 @@ require ( github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/go-kit/log v0.2.0 // indirect + github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect @@ -208,10 +212,10 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/openzipkin/zipkin-go v0.3.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/client_golang v1.15.1 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect github.com/prometheus/statsd_exporter v0.21.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shurcooL/githubv4 v0.0.0-20190718010115-4ba037080260 // indirect @@ -232,9 +236,9 @@ require ( golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.13.0 // indirect - google.golang.org/api v0.152.0 // indirect + google.golang.org/api v0.154.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f // indirect google.golang.org/grpc v1.60.1 google.golang.org/protobuf v1.31.0 gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 368a22b0a20..4b222594125 100644 --- a/go.sum +++ b/go.sum @@ -59,12 +59,12 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 h1:d81/ng9rET2YqdVkVwkb6EXeRrLJIwyGnJcAlAWKwhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 h1:MyVTgWR8qd/Jw1Le0NZebGBUCLbtak3bJ3z1OlqZBpw= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1/go.mod h1:GpPjLhVR9dnUoJMyHWSPy71xY9/lcmpzIPZXmF0FCVY= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80= @@ -153,51 +153,51 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.48.11 h1:9YbiSbaF/jWi+qLRl+J5dEhr2mcbDYHmKg2V7RBcD5M= +github.com/aws/aws-sdk-go v1.49.4 h1:qiXsqEeLLhdLgUIyfr5ot+N/dGPWALmtM1SetRmbUlY= github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.23.5 h1:xK6C4udTyDMd82RFvNkDQxtAd00xlzFUtX4fF2nMZyg= -github.com/aws/aws-sdk-go-v2 v1.23.5/go.mod h1:t3szzKfP0NeRU27uBFczDivYJjsmSnqI8kIvKyWb9ds= +github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk= +github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4= -github.com/aws/aws-sdk-go-v2/config v1.25.11 h1:RWzp7jhPRliIcACefGkKp03L0Yofmd2p8M25kbiyvno= -github.com/aws/aws-sdk-go-v2/config v1.25.11/go.mod h1:BVUs0chMdygHsQtvaMyEOpW2GIW+ubrxJLgIz/JU29s= +github.com/aws/aws-sdk-go-v2/config v1.26.1 h1:z6DqMxclFGL3Zfo+4Q0rLnAZ6yVkzCRxhRMsiRQnD1o= +github.com/aws/aws-sdk-go-v2/config v1.26.1/go.mod h1:ZB+CuKHRbb5v5F0oJtGdhFTelmrxd4iWO1lf0rQwSAg= github.com/aws/aws-sdk-go-v2/credentials v1.13.24/go.mod h1:jYPYi99wUOPIFi0rhiOvXeSEReVOzBqFNOX5bXYoG2o= -github.com/aws/aws-sdk-go-v2/credentials v1.16.9 h1:LQo3MUIOzod9JdUK+wxmSdgzLVYUbII3jXn3S/HJZU0= -github.com/aws/aws-sdk-go-v2/credentials v1.16.9/go.mod h1:R7mDuIJoCjH6TxGUc/cylE7Lp/o0bhKVoxdBThsjqCM= +github.com/aws/aws-sdk-go-v2/credentials v1.16.12 h1:v/WgB8NxprNvr5inKIiVVrXPuuTegM+K8nncFkr1usU= +github.com/aws/aws-sdk-go-v2/credentials v1.16.12/go.mod h1:X21k0FjEJe+/pauud82HYiQbEr9jRKY3kXEIQ4hXeTQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9 h1:FZVFahMyZle6WcogZCOxo6D/lkDA2lqKIn4/ueUmVXw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9/go.mod h1:kjq7REMIkxdtcEC9/4BVXjOsNY5isz6jQbEgk6osRTU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 h1:w98BT5w+ao1/r5sUuiH6JkVzjowOKeOJRHERyy1vh58= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10/go.mod h1:K2WGI7vUvkIv1HoNbfBA1bvIZ+9kL3YVmWxeKuLQsiw= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8 h1:8GVZIR0y6JRIUNSYI1xAMF4HDfV8H/bOsZ/8AD/uY5Q= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8/go.mod h1:rwBfu0SoUkBUZndVgPZKAD9Y2JigaZtRP68unRiYToQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 h1:v+HbZaCGmOwnTTVS86Fleq0vPzOd7tnJGbFhP0stNLs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9/go.mod h1:Xjqy+Nyj7VDLBtCMkQYOw1QYfAEZCVLrfI0ezve8wd4= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8 h1:ZE2ds/qeBkhk3yqYvS3CDCFNvd9ir5hMjlVStLZWrvM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8/go.mod h1:/lAPPymDYL023+TS6DJmjuL42nxix2AvEvfjqOBRODk= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 h1:N94sVhRACtXyVcjXxrwK1SKFIJrA9pOJ5yu2eSHnmls= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9/go.mod h1:hqamLz7g1/4EJP+GH5NBhcUMLjW+gKLQabgyz6/7WAU= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 h1:uR9lXYjdPX0xY+NhvaJ4dD8rpSRz5VY81ccIIoNG+lw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= github.com/aws/aws-sdk-go-v2/service/ecr v1.18.11 h1:wlTgmb/sCmVRJrN5De3CiHj4v/bTCgL5+qpdEd0CPtw= github.com/aws/aws-sdk-go-v2/service/ecr v1.18.11/go.mod h1:Ce1q2jlNm8BVpjLaOnwnm5v2RClAbK6txwPljFzyW6c= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.16.2 h1:yflJrGmi1pXtP9lOpOeaNZyc0vXnJTuP2sor3nJcGGo= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.16.2/go.mod h1:uHtRE7aqXNmpeYL+7Ec7LacH5zC9+w2T5MBOeEKDdu0= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 h1:e3PCNeEaev/ZF01cQyNZgmYE9oYYePIMJs2mWSKG514= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3/go.mod h1:gIeeNyaL8tIEqZrzAnTeyhHcE0yysCtcaP+N9kxLZ+E= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8 h1:EamsKe+ZjkOQjDdHd86/JCEucjFKQ9T0atWKO4s2Lgs= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8/go.mod h1:Q0vV3/csTpbkfKLI5Sb56cJQTCTtJ0ixdb7P+Wedqiw= -github.com/aws/aws-sdk-go-v2/service/kms v1.27.2 h1:I0NiSQiZu1UzP0akJWXSacjckEpYdN4VN7XYYfW6EYs= -github.com/aws/aws-sdk-go-v2/service/kms v1.27.2/go.mod h1:E2IzqbIZfYuYUgib2KxlaweBbkxHCb3ZIgnp85TjKic= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 h1:Nf2sHxjMJR8CSImIVCONRi4g0Su3J+TSTbS7G0pUeMU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9/go.mod h1:idky4TER38YIjr2cADF1/ugFMKvZV7p//pVeV5LZbF0= +github.com/aws/aws-sdk-go-v2/service/kms v1.27.6 h1:zzaFokMF7UVk22/Igtb93A1ReGP50uu99ldLWaEMfHc= +github.com/aws/aws-sdk-go-v2/service/kms v1.27.6/go.mod h1:D9FVDkZjkZnnFHymJ3fPVz0zOUlNSd0xcIIVmmrAac8= github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.2 h1:xJPydhNm0Hiqct5TVKEuHG7weC0+sOs4MUnd7A5n5F4= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.2/go.mod h1:zxk6y1X2KXThESWMS5CrKRvISD8mbIMab6nZrCGxDG0= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 h1:ldSFWz9tEHAwHNmjx2Cvy1MjP5/L9kNoR0skc6wyOOM= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.5/go.mod h1:CaFfXLYL376jgbP7VKC96uFcU8Rlavak0UlAwk1Dlhc= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5St5XIHHrQQtkxqrRincx4hmMHOk= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2 h1:8dU9zqA77C5egbU6yd4hFLaiIdPv3rU+6cp7sz5FjCU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2/go.mod h1:7Lt5mjQ8x5rVdKqg+sKKDeuwoszDJIIPmkd8BVsEdS0= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 h1:2k9KmFawS63euAkY4/ixVNsYYwrwnd5fIvgEKkfZFNM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5/go.mod h1:W+nd4wWDVkSUIox9bacmkBP5NMFQeTJ/xqNabpzSR38= github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.2 h1:fFrLsy08wEbAisqW3KDl/cPHrF43GmV79zXB9EwJiZw= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.2/go.mod h1:7Ld9eTqocTvJqqJ5K/orbSDwmGcpRdlDiLjz2DO+SL8= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 h1:5UYvv8JUvllZsRnfrcMQ+hJ9jNICmcgKPAO1CER25Wg= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.5/go.mod h1:XX5gh4CB7wAs4KhcF46G6C8a2i7eupU19dcAAE+EydU= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.18.1 h1:pOdBTUfXNazOlxLrgeYalVnuTpKreACHtc62xLwIB3c= -github.com/aws/smithy-go v1.18.1/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= +github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7 h1:G5IT+PEpFY0CDb3oITDP9tkmLrHkVD8Ny+elUmBqVYI= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7/go.mod h1:VVALgT1UESBh91dY0GprHnT1Z7mKd96VDk8qVy+bmu0= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -231,7 +231,6 @@ github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMr github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= @@ -258,6 +257,7 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= @@ -438,17 +438,17 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= -github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg= -github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= @@ -479,8 +479,8 @@ github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxF github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -491,8 +491,8 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= @@ -718,8 +718,6 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/vault/api v1.10.0 h1:/US7sIjWN6Imp4o/Rj1Ce2Nr5bki/AXi9vAW3p2tOJQ= github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= -github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc= -github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -739,8 +737,8 @@ github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/U github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= -github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= -github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= +github.com/jellydator/ttlcache/v3 v3.1.1 h1:RCgYJqo3jgvhl+fEWvjNW8thxGWsgxi+TPhRir1Y9y8= +github.com/jellydator/ttlcache/v3 v3.1.1/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= github.com/jenkins-x/go-scm v1.14.24 h1:m6btHeg/DUXNJeRa4ySAgwmwCEY7s1OjSkdK6Y1DFKI= github.com/jenkins-x/go-scm v1.14.24/go.mod h1:lKqSATMN9YB1Dbl8s8tzaUf5AFlAd+JRFipu2M6Ut2Y= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -750,7 +748,7 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -799,8 +797,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf h1:ndns1qx/5dL43g16EQkPV/i8+b3l5bYQwLeoSBe7tS8= -github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf/go.mod h1:aGkAgvWY/IUcVFfuly53REpfv5edu25oij+qHRFaraA= +github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e h1:RLTpX495BXToqxpM90Ws4hXEo4Wfh81jr9DX1n/4WOo= +github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e/go.mod h1:EAuqr9VFWxBi9nD5jc/EA2MT1RFty9288TF6zdtYoCU= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -962,16 +960,15 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -980,9 +977,8 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -994,9 +990,8 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf33yKI+pyX6T8= github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= @@ -1018,8 +1013,8 @@ github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiB github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= -github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= +github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= +github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -1030,14 +1025,14 @@ github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f/go.mod h1:AuYgA5K github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sigstore/sigstore v1.7.5 h1:ij55dBhLwjICmLTBJZm7SqoQLdsu/oowDanACcJNs48= github.com/sigstore/sigstore v1.7.5/go.mod h1:9OCmYWhzuq/G4e1cy9m297tuMRJ1LExyrXY3ZC3Zt/s= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.7.6 h1:WzZExOcFanrFfCi7SUgkBtJicWnSNziBD9nSSQIrqhc= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.7.6/go.mod h1:3zOHOLHnCE6EXyVH+6Z/lC9O1RDsbmR045NQ1DogiHw= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.7.6 h1:wsPt9kNXF1ZZyae2wO35NLsK+cjWqPGpuPaDdXzRe0g= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.7.6/go.mod h1:LH+ct6D77J8Ks6PXijMYYhmlQ1mbqKHbmy7+Sw5/Woc= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.7.6 h1:aMVT9XXFQEnBtJ6szzanyAdKT5gFK4emN+jLSlFlOso= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.7.6/go.mod h1:Hwhlx8JSZJF1R27JlwW/Bl2h40reG3MfKANREtBI0L8= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.7.6 h1:TdSHzcFtPJxbk4B+huWC6GDq7OpgHmLg18inRo9u70I= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.7.6/go.mod h1:/l/PzSbTOuIAtglOwUdlzzYvjIZ2WyaBpt5722JTmLY= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.0 h1:nLaaOX85YjBKQOQHWY2UlDkbx+je8ozTEM+t1ySAb78= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.0/go.mod h1:fLxrKqPP9lIz/B3UBD4ZK6j6984eX2czu/0zxm99fkE= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.0 h1:Txd7Fjei2NVb/sjBNYybrl+FcZGptO6FXXH4pVNBQMs= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.0/go.mod h1:mZjoLdfxFzo61abWNQisk8BcUbGshTO5HCpPRjzuPUs= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.0 h1:vQKLGL2H3L6AWnTddmF4TPKKNAM6GX1CtLsvIhCtjOw= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.0/go.mod h1:eaY3HCZUSNzqfkGsvkHSCkBlTQIQ4Sym9po09fAJw5w= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.0 h1:PspwJqJtD4bo0Aboo2UBrvznNUK7ETjD270GD9WLI88= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.0/go.mod h1:8ta2z6+ZsN8o3EdxGgpSn6VCAkTqLztV0L4YnLCwrwU= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -1079,8 +1074,8 @@ github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/y github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/spiffe/go-spiffe/v2 v2.1.5 h1:nFzp6pllCxpso6A2CaokdjhmH3uHWMNL9DGYXeZrShs= github.com/spiffe/go-spiffe/v2 v2.1.5/go.mod h1:eVDqm9xFvyqao6C+eQensb9ZPkyNEeaUbqbBpOhBnNk= -github.com/spiffe/spire-api-sdk v1.8.5 h1:DjYWO2muHvhwOBOTz/0zTGiBwJkofX/1V9mUAI+P4tU= -github.com/spiffe/spire-api-sdk v1.8.5/go.mod h1:4uuhFlN6KBWjACRP3xXwrOTNnvaLp1zJs8Lribtr4fI= +github.com/spiffe/spire-api-sdk v1.8.7 h1:LzKqts7VziON0/din8BV4gjtUSIZqMPgL7eljZm6cWk= +github.com/spiffe/spire-api-sdk v1.8.7/go.mod h1:4uuhFlN6KBWjACRP3xXwrOTNnvaLp1zJs8Lribtr4fI= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= @@ -1135,8 +1130,6 @@ github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:tw github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= -github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= @@ -1180,16 +1173,20 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1250,8 +1247,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230307190834-24139beb5833 h1:SChBja7BCQewoTAU7IgvucQKMIXrEpFxNMs0spT3/5s= -golang.org/x/exp v0.0.0-20230307190834-24139beb5833/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1475,7 +1472,6 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1617,8 +1613,8 @@ google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBz google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.152.0 h1:t0r1vPnfMc260S2Ci+en7kfCZaLOPs5KI0sVV/6jZrY= -google.golang.org/api v0.152.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= +google.golang.org/api v0.154.0 h1:X7QkVKZBskztmpPKWQXgjJRPA2dJYrL6r+sYPRLj050= +google.golang.org/api v0.154.0/go.mod h1:qhSMkM85hgqiokIYsrRyKxrjfBeIhgl4Z2JmeRkYylc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1677,12 +1673,12 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY= +google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f h1:2yNACc1O40tTnrsbk9Cv6oxiW8pxI/pXj0wRtdlYmgY= +google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f/go.mod h1:Uy9bTZJqmfrw2rIBxgGLnamc78euZULUBrLZ9XTITKI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1734,7 +1730,6 @@ google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1746,6 +1741,8 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/go-jose/go-jose.v2 v2.6.1 h1:qEzJlIDmG9q5VO0M/o8tGS65QMHMS1w01TQJB1VPJ4U= +gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index aa30abf3739..5c8411cb575 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,15 @@ # Release History +## 1.9.1 (2023-12-11) + +### Bugs Fixed + +* The `retry-after-ms` and `x-ms-retry-after-ms` headers weren't being checked during retries. + +### Other Changes + +* Update dependencies. + ## 1.9.0 (2023-11-06) ### Breaking Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 272f06155ea..bb93daee681 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -22,11 +22,13 @@ const ( HeaderLocation = "Location" HeaderOperationLocation = "Operation-Location" HeaderRetryAfter = "Retry-After" + HeaderRetryAfterMS = "Retry-After-Ms" HeaderUserAgent = "User-Agent" HeaderWWWAuthenticate = "WWW-Authenticate" HeaderXMSClientRequestID = "x-ms-client-request-id" HeaderXMSRequestID = "x-ms-request-id" HeaderXMSErrorCode = "x-ms-error-code" + HeaderXMSRetryAfterMS = "x-ms-retry-after-ms" ) const BearerTokenPrefix = "Bearer " @@ -38,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.9.0" + Version = "v1.9.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go index 16bc105f481..d3da2c5fdfa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go @@ -44,22 +44,64 @@ func Delay(ctx context.Context, delay time.Duration) error { } } -// RetryAfter returns non-zero if the response contains a Retry-After header value. +// RetryAfter returns non-zero if the response contains one of the headers with a "retry after" value. +// Headers are checked in the following order: retry-after-ms, x-ms-retry-after-ms, retry-after func RetryAfter(resp *http.Response) time.Duration { if resp == nil { return 0 } - ra := resp.Header.Get(HeaderRetryAfter) - if ra == "" { - return 0 + + type retryData struct { + header string + units time.Duration + + // custom is used when the regular algorithm failed and is optional. + // the returned duration is used verbatim (units is not applied). + custom func(string) time.Duration } - // retry-after values are expressed in either number of - // seconds or an HTTP-date indicating when to try again - if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { - return time.Duration(retryAfter) * time.Second - } else if t, err := time.Parse(time.RFC1123, ra); err == nil { - return time.Until(t) + + nop := func(string) time.Duration { return 0 } + + // the headers are listed in order of preference + retries := []retryData{ + { + header: HeaderRetryAfterMS, + units: time.Millisecond, + custom: nop, + }, + { + header: HeaderXMSRetryAfterMS, + units: time.Millisecond, + custom: nop, + }, + { + header: HeaderRetryAfter, + units: time.Second, + + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + custom: func(ra string) time.Duration { + t, err := time.Parse(time.RFC1123, ra) + if err != nil { + return 0 + } + return time.Until(t) + }, + }, } + + for _, retry := range retries { + v := resp.Header.Get(retry.header) + if v == "" { + continue + } + if retryAfter, _ := strconv.Atoi(v); retryAfter > 0 { + return time.Duration(retryAfter) * retry.units + } else if d := retry.custom(v); d > 0 { + return d + } + } + return 0 } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go index b361c13867c..1ee54cfe0f6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -150,6 +150,18 @@ type Config struct { // BaseEndpoint is an intermediary transfer location to a service specific // BaseEndpoint on a service's Options. BaseEndpoint *string + + // DisableRequestCompression toggles if an operation request could be + // compressed or not. Will be set to false by default. This variable is sourced from + // environment variable AWS_DISABLE_REQUEST_COMPRESSION or the shared config profile attribute + // disable_request_compression + DisableRequestCompression bool + + // RequestMinCompressSizeBytes sets the inclusive min bytes of a request body that could be + // compressed. Will be set to 10240 by default and must be within 0 and 10485760 bytes inclusively. + // This variable is sourced from environment variable AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES or + // the shared config profile attribute request_min_compression_size_bytes + RequestMinCompressSizeBytes int64 } // NewConfig returns a new Config pointer that can be chained with builder diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index c760c48e3dc..9a844f30e51 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.23.5" +const goModuleVersion = "1.24.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index dbe24d3e125..79eae3632e8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,17 @@ +# v1.26.1 (2023-12-08) + +* **Bug Fix**: Correct loading of [services *] sections into shared config. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.0 (2023-12-07) + +* **Feature**: Support modeled request compression. The only algorithm supported at this time is `gzip`. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.12 (2023-12-06) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.25.11 (2023-12-01) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go index dfe62973221..50582d89d54 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go @@ -70,10 +70,16 @@ var defaultAWSConfigResolvers = []awsConfigResolver{ // httpBearerAuth authentication scheme. resolveBearerAuthToken, - // Sets the sdk app ID if present in shared config profile + // Sets the sdk app ID if present in env var or shared config profile resolveAppID, resolveBaseEndpoint, + + // Sets the DisableRequestCompression if present in env var or shared config profile + resolveDisableRequestCompression, + + // Sets the RequestMinCompressSizeBytes if present in env var or shared config profile + resolveRequestMinCompressSizeBytes, } // A Config represents a generic configuration value or set of values. This type diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go index fb0615aeb42..88550198cce 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go @@ -12,6 +12,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression" ) // CredentialsSourceName provides a name of the provider when config is @@ -75,6 +76,9 @@ const ( awsIgnoreConfiguredEndpoints = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS" awsEndpointURL = "AWS_ENDPOINT_URL" + awsDisableRequestCompression = "AWS_DISABLE_REQUEST_COMPRESSION" + awsRequestMinCompressionSizeBytes = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES" + awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH" ) @@ -271,6 +275,15 @@ type EnvConfig struct { // corresponding endpoint resolution field. BaseEndpoint string + // determine if request compression is allowed, default to false + // retrieved from env var AWS_DISABLE_REQUEST_COMPRESSION + DisableRequestCompression *bool + + // inclusive threshold request body size to trigger compression, + // default to 10240 and must be within 0 and 10485760 bytes inclusive + // retrieved from env var AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES + RequestMinCompressSizeBytes *int64 + // Whether S3Express auth is disabled. // // This will NOT prevent requests from being made to S3Express buckets, it @@ -319,6 +332,13 @@ func NewEnvConfig() (EnvConfig, error) { cfg.AppID = os.Getenv(awsSdkAppID) + if err := setBoolPtrFromEnvVal(&cfg.DisableRequestCompression, []string{awsDisableRequestCompression}); err != nil { + return cfg, err + } + if err := setInt64PtrFromEnvVal(&cfg.RequestMinCompressSizeBytes, []string{awsRequestMinCompressionSizeBytes}, smithyrequestcompression.MaxRequestMinCompressSizeBytes); err != nil { + return cfg, err + } + if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvVar}); err != nil { return cfg, err } @@ -383,6 +403,20 @@ func (c EnvConfig) getAppID(context.Context) (string, bool, error) { return c.AppID, len(c.AppID) > 0, nil } +func (c EnvConfig) getDisableRequestCompression(context.Context) (bool, bool, error) { + if c.DisableRequestCompression == nil { + return false, false, nil + } + return *c.DisableRequestCompression, true, nil +} + +func (c EnvConfig) getRequestMinCompressSizeBytes(context.Context) (int64, bool, error) { + if c.RequestMinCompressSizeBytes == nil { + return 0, false, nil + } + return *c.RequestMinCompressSizeBytes, true, nil +} + // GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, // and not 0. func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { @@ -639,6 +673,30 @@ func setBoolPtrFromEnvVal(dst **bool, keys []string) error { return nil } +func setInt64PtrFromEnvVal(dst **int64, keys []string, max int64) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf("invalid value for env var, %s=%s, need int64", k, value) + } else if v < 0 || v > max { + return fmt.Errorf("invalid range for env var min request compression size bytes %q, must be within 0 and 10485760 inclusively", v) + } + if *dst == nil { + *dst = new(int64) + } + + **dst = v + break + } + + return nil +} + func setEndpointDiscoveryTypeFromEnvVal(dst *aws.EndpointDiscoveryEnableState, keys []string) error { for _, k := range keys { value := os.Getenv(k) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index 50962ab387f..b7c325d3eac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.25.11" +const goModuleVersion = "1.26.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go index 25c05a5536b..06596c1b7c8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go @@ -207,6 +207,12 @@ type LoadOptions struct { // The sdk app ID retrieved from env var or shared config to be added to request user agent header AppID string + // Specifies whether an operation request could be compressed + DisableRequestCompression *bool + + // The inclusive min bytes of a request body that could be compressed + RequestMinCompressSizeBytes *int64 + // Whether S3 Express auth is disabled. S3DisableExpressAuth *bool } @@ -256,6 +262,22 @@ func (o LoadOptions) getAppID(ctx context.Context) (string, bool, error) { return o.AppID, len(o.AppID) > 0, nil } +// getDisableRequestCompression returns DisableRequestCompression from config's LoadOptions +func (o LoadOptions) getDisableRequestCompression(ctx context.Context) (bool, bool, error) { + if o.DisableRequestCompression == nil { + return false, false, nil + } + return *o.DisableRequestCompression, true, nil +} + +// getRequestMinCompressSizeBytes returns RequestMinCompressSizeBytes from config's LoadOptions +func (o LoadOptions) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) { + if o.RequestMinCompressSizeBytes == nil { + return 0, false, nil + } + return *o.RequestMinCompressSizeBytes, true, nil +} + // WithRegion is a helper function to construct functional options // that sets Region on config's LoadOptions. Setting the region to // an empty string, will result in the region value being ignored. @@ -277,6 +299,30 @@ func WithAppID(ID string) LoadOptionsFunc { } } +// WithDisableRequestCompression is a helper function to construct functional options +// that sets DisableRequestCompression on config's LoadOptions. +func WithDisableRequestCompression(DisableRequestCompression *bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + if DisableRequestCompression == nil { + return nil + } + o.DisableRequestCompression = DisableRequestCompression + return nil + } +} + +// WithRequestMinCompressSizeBytes is a helper function to construct functional options +// that sets RequestMinCompressSizeBytes on config's LoadOptions. +func WithRequestMinCompressSizeBytes(RequestMinCompressSizeBytes *int64) LoadOptionsFunc { + return func(o *LoadOptions) error { + if RequestMinCompressSizeBytes == nil { + return nil + } + o.RequestMinCompressSizeBytes = RequestMinCompressSizeBytes + return nil + } +} + // getDefaultRegion returns DefaultRegion from config's LoadOptions func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) { if len(o.DefaultRegion) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go index d5235846011..13745fc98fd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go @@ -191,6 +191,40 @@ func getAppID(ctx context.Context, configs configs) (value string, found bool, e return } +// disableRequestCompressionProvider provides access to the DisableRequestCompression +type disableRequestCompressionProvider interface { + getDisableRequestCompression(context.Context) (bool, bool, error) +} + +func getDisableRequestCompression(ctx context.Context, configs configs) (value bool, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(disableRequestCompressionProvider); ok { + value, found, err = p.getDisableRequestCompression(ctx) + if err != nil || found { + break + } + } + } + return +} + +// requestMinCompressSizeBytesProvider provides access to the MinCompressSizeBytes +type requestMinCompressSizeBytesProvider interface { + getRequestMinCompressSizeBytes(context.Context) (int64, bool, error) +} + +func getRequestMinCompressSizeBytes(ctx context.Context, configs configs) (value int64, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(requestMinCompressSizeBytesProvider); ok { + value, found, err = p.getRequestMinCompressSizeBytes(ctx) + if err != nil || found { + break + } + } + } + return +} + // ec2IMDSRegionProvider provides access to the ec2 imds region // configuration value type ec2IMDSRegionProvider interface { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go index b3b2c93cdc4..fde2e3980e0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go @@ -139,6 +139,33 @@ func resolveAppID(ctx context.Context, cfg *aws.Config, configs configs) error { return nil } +// resolveDisableRequestCompression extracts the DisableRequestCompression from the configs slice's +// SharedConfig or EnvConfig +func resolveDisableRequestCompression(ctx context.Context, cfg *aws.Config, configs configs) error { + disable, _, err := getDisableRequestCompression(ctx, configs) + if err != nil { + return err + } + + cfg.DisableRequestCompression = disable + return nil +} + +// resolveRequestMinCompressSizeBytes extracts the RequestMinCompressSizeBytes from the configs slice's +// SharedConfig or EnvConfig +func resolveRequestMinCompressSizeBytes(ctx context.Context, cfg *aws.Config, configs configs) error { + minBytes, found, err := getRequestMinCompressSizeBytes(ctx, configs) + if err != nil { + return err + } + // must set a default min size 10240 if not configured + if !found { + minBytes = 10240 + } + cfg.RequestMinCompressSizeBytes = minBytes + return nil +} + // resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default // region if region had not been resolved from other sources. func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go index 823eafe12b8..c546cb7d0f5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go @@ -17,6 +17,7 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/ini" "github.com/aws/aws-sdk-go-v2/internal/shareddefaults" "github.com/aws/smithy-go/logging" + smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression" ) const ( @@ -30,7 +31,7 @@ const ( // Prefix for services section. It is referenced in profile via the services // parameter to configure clients for service-specific parameters. - servicesPrefix = `services` + servicesPrefix = `services ` // string equivalent for boolean endpointDiscoveryDisabled = `false` @@ -108,6 +109,11 @@ const ( endpointURL = "endpoint_url" + servicesSectionKey = "services" + + disableRequestCompression = "disable_request_compression" + requestMinCompressionSizeBytes = "request_min_compression_size_bytes" + s3DisableExpressSessionAuthKey = "s3_disable_express_session_auth" ) @@ -316,8 +322,18 @@ type SharedConfig struct { // corresponding endpoint resolution field. BaseEndpoint string - // Value to contain services section content. - Services Services + // Services section config. + ServicesSectionName string + Services Services + + // determine if request compression is allowed, default to false + // retrieved from config file's profile field disable_request_compression + DisableRequestCompression *bool + + // inclusive threshold request body size to trigger compression, + // default to 10240 and must be within 0 and 10485760 bytes inclusive + // retrieved from config file's profile field request_min_compression_size_bytes + RequestMinCompressSizeBytes *int64 // Whether S3Express auth is disabled. // @@ -994,14 +1010,11 @@ func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile c.SSOSession = &ssoSession } - for _, sectionName := range sections.List() { - if strings.HasPrefix(sectionName, servicesPrefix) { - section, ok := sections.GetSection(sectionName) - if ok { - var svcs Services - svcs.setFromIniSection(section) - c.Services = svcs - } + if len(c.ServicesSectionName) > 0 { + if section, ok := sections.GetSection(servicesPrefix + c.ServicesSectionName); ok { + var svcs Services + svcs.setFromIniSection(section) + c.Services = svcs } } @@ -1104,6 +1117,13 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er updateString(&c.BaseEndpoint, section, endpointURL) + if err := updateDisableRequestCompression(&c.DisableRequestCompression, section, disableRequestCompression); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", disableRequestCompression, err) + } + if err := updateRequestMinCompressSizeBytes(&c.RequestMinCompressSizeBytes, section, requestMinCompressionSizeBytes); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", requestMinCompressionSizeBytes, err) + } + // Shared Credentials creds := aws.Credentials{ AccessKeyID: section.String(accessKeyIDKey), @@ -1116,9 +1136,61 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er c.Credentials = creds } + updateString(&c.ServicesSectionName, section, servicesSectionKey) + + return nil +} + +func updateRequestMinCompressSizeBytes(bytes **int64, sec ini.Section, key string) error { + if !sec.Has(key) { + return nil + } + + v, ok := sec.Int(key) + if !ok { + return fmt.Errorf("invalid value for min request compression size bytes %s, need int64", sec.String(key)) + } + if v < 0 || v > smithyrequestcompression.MaxRequestMinCompressSizeBytes { + return fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", v) + } + *bytes = new(int64) + **bytes = v return nil } +func updateDisableRequestCompression(disable **bool, sec ini.Section, key string) error { + if !sec.Has(key) { + return nil + } + + v := sec.String(key) + switch { + case v == "true": + *disable = new(bool) + **disable = true + case v == "false": + *disable = new(bool) + **disable = false + default: + return fmt.Errorf("invalid value for shared config profile field, %s=%s, need true or false", key, v) + } + return nil +} + +func (c SharedConfig) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) { + if c.RequestMinCompressSizeBytes == nil { + return 0, false, nil + } + return *c.RequestMinCompressSizeBytes, true, nil +} + +func (c SharedConfig) getDisableRequestCompression(ctx context.Context) (bool, bool, error) { + if c.DisableRequestCompression == nil { + return false, false, nil + } + return *c.DisableRequestCompression, true, nil +} + func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error { if !section.Has(key) { return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 529591f9c02..dd7af71d18c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,15 @@ +# v1.16.12 (2023-12-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.11 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.10 (2023-12-06) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.16.9 (2023-12-01) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index 16aacea8d22..ec3eb5f6e8c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.9" +const goModuleVersion = "1.16.12" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index 3381934716f..eef77e9d5bc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.14.10 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.14.9 (2023-12-01) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index 9979479a91a..ce3e3111861 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.14.9" +const goModuleVersion = "1.14.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index 8ad41847f8b..5ceb3b82f47 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.2.9 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.2.8 (2023-12-01) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index 44c5156ac2a..da7d0d813ef 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.2.8" +const goModuleVersion = "1.2.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go index 7ea49d4ea40..849beffd7da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go @@ -11,7 +11,7 @@ func GetPartition(region string) *PartitionConfig { var partitions = []Partition{ { ID: "aws", - RegionRegex: "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + RegionRegex: "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ Name: "aws", DnsSuffix: "amazonaws.com", @@ -90,6 +90,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "ap-southeast-4": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "aws-global": { Name: nil, DnsSuffix: nil, @@ -160,6 +167,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "il-central-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "me-central-1": { Name: nil, DnsSuffix: nil, @@ -340,4 +354,28 @@ var partitions = []Partition{ }, }, }, + { + ID: "aws-iso-e", + RegionRegex: "^eu\\-isoe\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-iso-e", + DnsSuffix: "cloud.adc-e.uk", + DualStackDnsSuffix: "cloud.adc-e.uk", + SupportsFIPS: true, + SupportsDualStack: false, + }, + Regions: map[string]RegionOverrides{}, + }, + { + ID: "aws-iso-f", + RegionRegex: "^us\\-isof\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-iso-f", + DnsSuffix: "csp.hci.ic.gov", + DualStackDnsSuffix: "csp.hci.ic.gov", + SupportsFIPS: true, + SupportsDualStack: false, + }, + Regions: map[string]RegionOverrides{}, + }, } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index 172f6228638..761cc992b8e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,7 @@ +# v2.5.9 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.5.8 (2023-12-01) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index 3f2d7085307..caabf668d49 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.5.8" +const goModuleVersion = "2.5.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md index 149f7f716d4..adbbf4adcf4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.7.2 (2023-12-08) + +* **Bug Fix**: Correct loading of [services *] sections into shared config. + # v1.7.1 (2023-11-16) * **Bug Fix**: Fix recognition of trailing comments in shared config properties. # or ; separators that aren't preceded by whitespace at the end of a property value should be considered part of it. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go index 5080ebe68f5..f0673f3a024 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -3,4 +3,4 @@ package ini // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.7.1" +const goModuleVersion = "1.7.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go index ade75bf34e4..e3706b3c31b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go @@ -54,18 +54,7 @@ func (v Value) String() string { // MapValue returns a map value for sub properties func (v Value) MapValue() map[string]string { - newlineParts := strings.Split(string(v.str), "\n") - mp := make(map[string]string) - for _, part := range newlineParts { - operandParts := strings.Split(part, "=") - if len(operandParts) < 2 { - continue - } - key := strings.TrimSpace(operandParts[0]) - val := strings.TrimSpace(operandParts[1]) - mp[key] = val - } - return mp + return v.mp } // IntValue returns an integer value diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index c2a206b03b9..c3525fd2298 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.10.4 (2023-12-07) + +* No change notes available for this release. + # v1.10.3 (2023-11-30) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index 4a245da2962..cc6384005aa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.10.3" +const goModuleVersion = "1.10.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index 5e433862c3b..1191b30c69b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.10.9 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.10.8 (2023-12-01) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index 27415898668..aacb4dd2485 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.10.8" +const goModuleVersion = "1.10.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md index 55efccf9bf4..b4242ec3833 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md @@ -1,3 +1,19 @@ +# v1.27.6 (2023-12-15) + +* **Documentation**: Documentation updates for AWS Key Management Service + +# v1.27.5 (2023-12-08) + +* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. + +# v1.27.4 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.3 (2023-12-06) + +* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. + # v1.27.2 (2023-12-01) * **Bug Fix**: Correct wrapping of errors in authentication workflow. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go index d756de129f3..d1af622670f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go @@ -43,6 +43,8 @@ func New(options Options, optFns ...func(*Options)) *Client { setResolvedDefaultsMode(&options) + resolveRetryer(&options) + resolveHTTPClient(&options) resolveHTTPSignerV4(&options) @@ -55,10 +57,12 @@ func New(options Options, optFns ...func(*Options)) *Client { fn(&options) } - resolveRetryer(&options) + finalizeRetryMaxAttempts(&options) ignoreAnonymousAuth(&options) + wrapWithAnonymousAuth(&options) + resolveAuthSchemes(&options) client := &Client{ @@ -86,7 +90,7 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf fn(&options) } - finalizeRetryMaxAttemptOptions(&options, *c) + finalizeOperationRetryMaxAttempts(&options, *c) finalizeClientEndpointResolverOptions(&options) @@ -334,7 +338,15 @@ func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { o.RetryMaxAttempts = cfg.RetryMaxAttempts } -func finalizeRetryMaxAttemptOptions(o *Options, client Client) { +func finalizeRetryMaxAttempts(o *Options) { + if o.RetryMaxAttempts == 0 { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func finalizeOperationRetryMaxAttempts(o *Options, client Client) { if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { return } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go index 3cc5e2acfd2..392786c3c76 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go @@ -21,7 +21,10 @@ import ( // the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. // Required permissions: kms:CancelKeyDeletion (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) Related operations: ScheduleKeyDeletion +// (key policy) Related operations: ScheduleKeyDeletion Eventual consistency: The +// KMS API follows an eventual consistency model. For more information, see KMS +// eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) CancelKeyDeletion(ctx context.Context, params *CancelKeyDeletionInput, optFns ...func(*Options)) (*CancelKeyDeletionOutput, error) { if params == nil { params = &CancelKeyDeletionInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go index 5112a2dc299..a8f6feae3f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go @@ -67,6 +67,10 @@ import ( // - DescribeCustomKeyStores // - DisconnectCustomKeyStore // - UpdateCustomKeyStore +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) ConnectCustomKeyStore(ctx context.Context, params *ConnectCustomKeyStoreInput, optFns ...func(*Options)) (*ConnectCustomKeyStoreOutput, error) { if params == nil { params = &ConnectCustomKeyStoreInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go index a5d2a5872af..53b7ca69a77 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go @@ -42,6 +42,10 @@ import ( // - DeleteAlias // - ListAliases // - UpdateAlias +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) CreateAlias(ctx context.Context, params *CreateAliasInput, optFns ...func(*Options)) (*CreateAliasOutput, error) { if params == nil { params = &CreateAliasInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go index 5c9b2e28e54..1fdda176dde 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go @@ -69,6 +69,10 @@ import ( // - DescribeCustomKeyStores // - DisconnectCustomKeyStore // - UpdateCustomKeyStore +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) CreateCustomKeyStore(ctx context.Context, params *CreateCustomKeyStoreInput, optFns ...func(*Options)) (*CreateCustomKeyStoreOutput, error) { if params == nil { params = &CreateCustomKeyStoreInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go index e1c94b06b04..94f93a63c47 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go @@ -47,6 +47,10 @@ import ( // - ListRetirableGrants // - RetireGrant // - RevokeGrant +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) CreateGrant(ctx context.Context, params *CreateGrantInput, optFns ...func(*Options)) (*CreateGrantOutput, error) { if params == nil { params = &CreateGrantInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go index a022d25b763..765f7f0f623 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go @@ -131,6 +131,10 @@ import ( // - DescribeKey // - ListKeys // - ScheduleKeyDeletion +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) CreateKey(ctx context.Context, params *CreateKeyInput, optFns ...func(*Options)) (*CreateKeyOutput, error) { if params == nil { params = &CreateKeyInput{} @@ -154,7 +158,8 @@ type CreateKeyInput struct { // information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key) // in the Key Management Service Developer Guide. Use this parameter only when you // intend to prevent the principal that is making the request from making a - // subsequent PutKeyPolicy request on the KMS key. + // subsequent PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html) + // request on the KMS key. BypassPolicyLockoutSafetyCheck bool // Creates the KMS key in the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go index eb7f88e7b43..0c347dc7e46 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go @@ -55,21 +55,25 @@ import ( // or any Amazon Web Services SDK. Use the Recipient parameter to provide the // attestation document for the enclave. Instead of the plaintext data, the // response includes the plaintext data encrypted with the public key from the -// attestation document ( CiphertextForRecipient ).For information about the +// attestation document ( CiphertextForRecipient ). For information about the // interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon // Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) -// in the Key Management Service Developer Guide.. The KMS key that you use for -// this operation must be in a compatible key state. For details, see Key states -// of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. Cross-account use: Yes. If you -// use the KeyId parameter to identify a KMS key in a different Amazon Web -// Services account, specify the key ARN or the alias ARN of the KMS key. Required +// in the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: Yes. If you use +// the KeyId parameter to identify a KMS key in a different Amazon Web Services +// account, specify the key ARN or the alias ARN of the KMS key. Required // permissions: kms:Decrypt (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: // - Encrypt // - GenerateDataKey // - GenerateDataKeyPair // - ReEncrypt +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) Decrypt(ctx context.Context, params *DecryptInput, optFns ...func(*Options)) (*DecryptOutput, error) { if params == nil { params = &DecryptInput{} @@ -146,7 +150,7 @@ type DecryptInput struct { // get the alias name and alias ARN, use ListAliases . KeyId *string - // A signed attestation document (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave-how.html#term-attestdoc) + // A signed attestation document (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-concepts.html#term-attestdoc) // from an Amazon Web Services Nitro enclave and the encryption algorithm to use // with the enclave's public key. The only valid encryption algorithm is // RSAES_OAEP_SHA_256 . This parameter only supports attestation documents for diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go index aa989d54c44..e4f12857790 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go @@ -32,6 +32,10 @@ import ( // - CreateAlias // - ListAliases // - UpdateAlias +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) DeleteAlias(ctx context.Context, params *DeleteAliasInput, optFns ...func(*Options)) (*DeleteAliasOutput, error) { if params == nil { params = &DeleteAliasInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go index 0dc8fae3dd0..3c424c67060 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go @@ -46,6 +46,10 @@ import ( // - DescribeCustomKeyStores // - DisconnectCustomKeyStore // - UpdateCustomKeyStore +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) DeleteCustomKeyStore(ctx context.Context, params *DeleteCustomKeyStoreInput, optFns ...func(*Options)) (*DeleteCustomKeyStoreOutput, error) { if params == nil { params = &DeleteCustomKeyStoreInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go index b8e39caf76f..5fa68070333 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go @@ -26,6 +26,10 @@ import ( // (key policy) Related operations: // - GetParametersForImport // - ImportKeyMaterial +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) DeleteImportedKeyMaterial(ctx context.Context, params *DeleteImportedKeyMaterialInput, optFns ...func(*Options)) (*DeleteImportedKeyMaterialOutput, error) { if params == nil { params = &DeleteImportedKeyMaterialInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go index 0b93c3d30fb..8535fb17748 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go @@ -46,6 +46,10 @@ import ( // - DeleteCustomKeyStore // - DisconnectCustomKeyStore // - UpdateCustomKeyStore +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) DescribeCustomKeyStores(ctx context.Context, params *DescribeCustomKeyStoresInput, optFns ...func(*Options)) (*DescribeCustomKeyStoresOutput, error) { if params == nil { params = &DescribeCustomKeyStoresInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go index a0389d6e2a7..dc6e0bd05ae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go @@ -20,12 +20,13 @@ import ( // of the key material. It includes fields, like KeySpec , that help you // distinguish different types of KMS keys. It also displays the key usage // (encryption, signing, or generating and verifying MACs) and the algorithms that -// the KMS key supports. For multi-Region keys , DescribeKey displays the primary -// key and all related replica keys. For KMS keys in CloudHSM key stores , it -// includes information about the key store, such as the key store ID and the -// CloudHSM cluster ID. For KMS keys in external key stores , it includes the -// custom key store ID and the ID of the external key. DescribeKey does not return -// the following information: +// the KMS key supports. For multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) +// , DescribeKey displays the primary key and all related replica keys. For KMS +// keys in CloudHSM key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html) +// , it includes information about the key store, such as the key store ID and the +// CloudHSM cluster ID. For KMS keys in external key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html) +// , it includes the custom key store ID and the ID of the external key. +// DescribeKey does not return the following information: // - Aliases associated with the KMS key. To get this information, use // ListAliases . // - Whether automatic key rotation is enabled on the KMS key. To get this @@ -52,6 +53,10 @@ import ( // - ListKeys // - ListResourceTags // - ListRetirableGrants +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) DescribeKey(ctx context.Context, params *DescribeKeyInput, optFns ...func(*Options)) (*DescribeKeyOutput, error) { if params == nil { params = &DescribeKeyInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go index 5da8b601df6..55449b61973 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go @@ -21,7 +21,10 @@ import ( // in the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. // Required permissions: kms:DisableKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) Related operations: EnableKey +// (key policy) Related operations: EnableKey Eventual consistency: The KMS API +// follows an eventual consistency model. For more information, see KMS eventual +// consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) DisableKey(ctx context.Context, params *DisableKeyInput, optFns ...func(*Options)) (*DisableKeyOutput, error) { if params == nil { params = &DisableKeyInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go index 171f8d1736b..7cc889e2141 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go @@ -34,6 +34,10 @@ import ( // (key policy) Related operations: // - EnableKeyRotation // - GetKeyRotationStatus +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) DisableKeyRotation(ctx context.Context, params *DisableKeyRotationInput, optFns ...func(*Options)) (*DisableKeyRotationOutput, error) { if params == nil { params = &DisableKeyRotationInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go index 507bf1721d5..a1c62f52635 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go @@ -37,6 +37,10 @@ import ( // - DeleteCustomKeyStore // - DescribeCustomKeyStores // - UpdateCustomKeyStore +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) DisconnectCustomKeyStore(ctx context.Context, params *DisconnectCustomKeyStoreInput, optFns ...func(*Options)) (*DisconnectCustomKeyStoreOutput, error) { if params == nil { params = &DisconnectCustomKeyStoreInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go index cb37ef9c68e..24e90caccba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go @@ -18,7 +18,10 @@ import ( // in the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. // Required permissions: kms:EnableKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) Related operations: DisableKey +// (key policy) Related operations: DisableKey Eventual consistency: The KMS API +// follows an eventual consistency model. For more information, see KMS eventual +// consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) EnableKey(ctx context.Context, params *EnableKeyInput, optFns ...func(*Options)) (*EnableKeyOutput, error) { if params == nil { params = &EnableKeyInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go index b5953e228fc..ba2322bbe42 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go @@ -43,6 +43,10 @@ import ( // (key policy) Related operations: // - DisableKeyRotation // - GetKeyRotationStatus +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) EnableKeyRotation(ctx context.Context, params *EnableKeyRotationInput, optFns ...func(*Options)) (*EnableKeyRotationOutput, error) { if params == nil { params = &EnableKeyRotationInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go index c7cb9ef0c36..527dd2ca078 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go @@ -60,6 +60,10 @@ import ( // - Decrypt // - GenerateDataKey // - GenerateDataKeyPair +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) Encrypt(ctx context.Context, params *EncryptInput, optFns ...func(*Options)) (*EncryptOutput, error) { if params == nil { params = &EncryptInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go index ba0b66f1826..026f7399812 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go @@ -82,6 +82,10 @@ import ( // - GenerateDataKeyPair // - GenerateDataKeyPairWithoutPlaintext // - GenerateDataKeyWithoutPlaintext +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) GenerateDataKey(ctx context.Context, params *GenerateDataKeyInput, optFns ...func(*Options)) (*GenerateDataKeyOutput, error) { if params == nil { params = &GenerateDataKeyInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go index 8b7e3f0259d..5d6aa26e581 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go @@ -17,7 +17,7 @@ import ( // the private key that is encrypted under the symmetric encryption KMS key you // specify. You can use the data key pair to perform asymmetric cryptography and // implement digital signatures outside of KMS. The bytes in the keys are random; -// they not related to the caller or to the KMS key that is used to encrypt the +// they are not related to the caller or to the KMS key that is used to encrypt the // private key. You can use the public key that GenerateDataKeyPair returns to // encrypt data or verify a signature outside of KMS. Then, store the encrypted // private key with the data. When you are ready to decrypt data or sign a message, @@ -75,6 +75,10 @@ import ( // - GenerateDataKey // - GenerateDataKeyPairWithoutPlaintext // - GenerateDataKeyWithoutPlaintext +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) GenerateDataKeyPair(ctx context.Context, params *GenerateDataKeyPairInput, optFns ...func(*Options)) (*GenerateDataKeyPairOutput, error) { if params == nil { params = &GenerateDataKeyPairInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go index a6e58e4f8c3..ab7a699a6fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go @@ -52,6 +52,10 @@ import ( // - GenerateDataKey // - GenerateDataKeyPair // - GenerateDataKeyWithoutPlaintext +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) GenerateDataKeyPairWithoutPlaintext(ctx context.Context, params *GenerateDataKeyPairWithoutPlaintextInput, optFns ...func(*Options)) (*GenerateDataKeyPairWithoutPlaintextOutput, error) { if params == nil { params = &GenerateDataKeyPairWithoutPlaintextInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go index 0113a566dc7..bfc16ddf606 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go @@ -57,6 +57,10 @@ import ( // - GenerateDataKey // - GenerateDataKeyPair // - GenerateDataKeyPairWithoutPlaintext +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) GenerateDataKeyWithoutPlaintext(ctx context.Context, params *GenerateDataKeyWithoutPlaintextInput, optFns ...func(*Options)) (*GenerateDataKeyWithoutPlaintextOutput, error) { if params == nil { params = &GenerateDataKeyWithoutPlaintextInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go index ad7b6e0b35d..7a28b174e9c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go @@ -33,7 +33,10 @@ import ( // perform this operation with a KMS key in a different Amazon Web Services // account, specify the key ARN or alias ARN in the value of the KeyId parameter. // Required permissions: kms:GenerateMac (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) Related operations: VerifyMac +// (key policy) Related operations: VerifyMac Eventual consistency: The KMS API +// follows an eventual consistency model. For more information, see KMS eventual +// consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) GenerateMac(ctx context.Context, params *GenerateMacInput, optFns ...func(*Options)) (*GenerateMacOutput, error) { if params == nil { params = &GenerateMacInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go index 5fe3f3fb10d..42e4e073e5c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go @@ -33,7 +33,9 @@ import ( // Cross-account use: Not applicable. GenerateRandom does not use any // account-specific resources, such as KMS keys. Required permissions: // kms:GenerateRandom (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) +// (IAM policy) Eventual consistency: The KMS API follows an eventual consistency +// model. For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) GenerateRandom(ctx context.Context, params *GenerateRandomInput, optFns ...func(*Options)) (*GenerateRandomOutput, error) { if params == nil { params = &GenerateRandomInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go index c398993dc77..0302ffd0e76 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go @@ -14,7 +14,10 @@ import ( // Gets a key policy attached to the specified KMS key. Cross-account use: No. You // cannot perform this operation on a KMS key in a different Amazon Web Services // account. Required permissions: kms:GetKeyPolicy (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) Related operations: PutKeyPolicy +// (key policy) Related operations: PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html) +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) GetKeyPolicy(ctx context.Context, params *GetKeyPolicyInput, optFns ...func(*Options)) (*GetKeyPolicyOutput, error) { if params == nil { params = &GetKeyPolicyInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go index b60e1ae0060..018b102ac39 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go @@ -52,6 +52,10 @@ import ( // (key policy) Related operations: // - DisableKeyRotation // - EnableKeyRotation +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) GetKeyRotationStatus(ctx context.Context, params *GetKeyRotationStatusInput, optFns ...func(*Options)) (*GetKeyRotationStatusOutput, error) { if params == nil { params = &GetKeyRotationStatusInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go index 172892085d7..08e06adebbf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go @@ -24,10 +24,12 @@ import ( // EXTERNAL to create a KMS key with no key material. You can import key material // for a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, // or asymmetric signing KMS key. You can also import key material into a -// multi-Region key of any supported type. However, you can't import key material -// into a KMS key in a custom key store . You can also use GetParametersForImport -// to get a public key and import token to reimport the original key material into -// a KMS key whose key material expired or was deleted. GetParametersForImport +// multi-Region key (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) +// of any supported type. However, you can't import key material into a KMS key in +// a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// . You can also use GetParametersForImport to get a public key and import token +// to reimport the original key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#reimport-key-material) +// into a KMS key whose key material expired or was deleted. GetParametersForImport // returns the items that you need to import your key material. // - The public key (or "wrapping key") of an RSA key pair that KMS generates. // You will use this public key to encrypt ("wrap") your key material while it's in @@ -58,6 +60,10 @@ import ( // (key policy) Related operations: // - ImportKeyMaterial // - DeleteImportedKeyMaterial +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) GetParametersForImport(ctx context.Context, params *GetParametersForImportInput, optFns ...func(*Options)) (*GetParametersForImportOutput, error) { if params == nil { params = &GetParametersForImportInput{} @@ -106,8 +112,8 @@ type GetParametersForImportInput struct { // - RSAES_OAEP_SHA_1 — Supported for all types of key material, except RSA key // material (private key). You cannot use the RSAES_OAEP_SHA_1 wrapping algorithm // with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material. - // - RSAES_PKCS1_V1_5 (Deprecated) — Supported only for symmetric encryption key - // material (and only in legacy mode). + // - RSAES_PKCS1_V1_5 (Deprecated) — As of October 10, 2023, KMS does not + // support the RSAES_PKCS1_V1_5 wrapping algorithm. // // This member is required. WrappingAlgorithm types.AlgorithmSpec diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go index 10091e2ddf2..56b596d1288 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go @@ -51,7 +51,10 @@ import ( // perform this operation with a KMS key in a different Amazon Web Services // account, specify the key ARN or alias ARN in the value of the KeyId parameter. // Required permissions: kms:GetPublicKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) Related operations: CreateKey +// (key policy) Related operations: CreateKey Eventual consistency: The KMS API +// follows an eventual consistency model. For more information, see KMS eventual +// consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) GetPublicKey(ctx context.Context, params *GetPublicKeyInput, optFns ...func(*Options)) (*GetPublicKeyOutput, error) { if params == nil { params = &GetPublicKeyInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go index 7172c3c7e72..052fca8801c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go @@ -86,6 +86,10 @@ import ( // (key policy) Related operations: // - DeleteImportedKeyMaterial // - GetParametersForImport +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) ImportKeyMaterial(ctx context.Context, params *ImportKeyMaterialInput, optFns ...func(*Options)) (*ImportKeyMaterialOutput, error) { if params == nil { params = &ImportKeyMaterialInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go index 944e9bb93ba..6462e7284e1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go @@ -32,6 +32,10 @@ import ( // - CreateAlias // - DeleteAlias // - UpdateAlias +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) ListAliases(ctx context.Context, params *ListAliasesInput, optFns ...func(*Options)) (*ListAliasesOutput, error) { if params == nil { params = &ListAliasesInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go index a7e07190526..9c640211011 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go @@ -31,6 +31,10 @@ import ( // - ListRetirableGrants // - RetireGrant // - RevokeGrant +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) ListGrants(ctx context.Context, params *ListGrantsInput, optFns ...func(*Options)) (*ListGrantsOutput, error) { if params == nil { params = &ListGrantsInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go index e207b236d07..37fa57df987 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go @@ -18,7 +18,11 @@ import ( // Services account. Required permissions: kms:ListKeyPolicies (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: // - GetKeyPolicy -// - PutKeyPolicy +// - PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html) +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) ListKeyPolicies(ctx context.Context, params *ListKeyPoliciesInput, optFns ...func(*Options)) (*ListKeyPoliciesOutput, error) { if params == nil { params = &ListKeyPoliciesInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go index b7328b9a806..0aeeb2982f3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go @@ -20,6 +20,10 @@ import ( // - DescribeKey // - ListAliases // - ListResourceTags +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) ListKeys(ctx context.Context, params *ListKeysInput, optFns ...func(*Options)) (*ListKeysOutput, error) { if params == nil { params = &ListKeysInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go index b99781652e5..f5730845680 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go @@ -24,6 +24,10 @@ import ( // - ReplicateKey // - TagResource // - UntagResource +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) ListResourceTags(ctx context.Context, params *ListResourceTagsInput, optFns ...func(*Options)) (*ListResourceTagsOutput, error) { if params == nil { params = &ListResourceTagsInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go index 193892a9ae5..ec8f29ca9ed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go @@ -22,15 +22,27 @@ import ( // in the Key Management Service Developer Guide . For examples of working with // grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html) // . Cross-account use: You must specify a principal in your Amazon Web Services -// account. However, this operation can return grants in any Amazon Web Services -// account. You do not need kms:ListRetirableGrants permission (or any other -// additional permission) in any Amazon Web Services account other than your own. -// Required permissions: kms:ListRetirableGrants (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) in your Amazon Web Services account. Related operations: +// account. This operation returns a list of grants where the retiring principal +// specified in the ListRetirableGrants request is the same retiring principal on +// the grant. This can include grants on KMS keys owned by other Amazon Web +// Services accounts, but you do not need kms:ListRetirableGrants permission (or +// any other additional permission) in any Amazon Web Services account other than +// your own. Required permissions: kms:ListRetirableGrants (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) in your Amazon Web Services account. KMS authorizes +// ListRetirableGrants requests by evaluating the caller account's +// kms:ListRetirableGrants permissions. The authorized resource in +// ListRetirableGrants calls is the retiring principal specified in the request. +// KMS does not evaluate the caller's permissions to verify their access to any KMS +// keys or grants that might be returned by the ListRetirableGrants call. Related +// operations: // - CreateGrant // - ListGrants // - RetireGrant // - RevokeGrant +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) ListRetirableGrants(ctx context.Context, params *ListRetirableGrantsInput, optFns ...func(*Options)) (*ListRetirableGrantsOutput, error) { if params == nil { params = &ListRetirableGrantsInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go index 2cc0f596c26..e7a9e8c61d5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go @@ -20,7 +20,10 @@ import ( // in the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. // Required permissions: kms:PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) Related operations: GetKeyPolicy +// (key policy) Related operations: GetKeyPolicy Eventual consistency: The KMS API +// follows an eventual consistency model. For more information, see KMS eventual +// consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) PutKeyPolicy(ctx context.Context, params *PutKeyPolicyInput, optFns ...func(*Options)) (*PutKeyPolicyOutput, error) { if params == nil { params = &PutKeyPolicyInput{} @@ -88,7 +91,8 @@ type PutKeyPolicyInput struct { // information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key) // in the Key Management Service Developer Guide. Use this parameter only when you // intend to prevent the principal that is making the request from making a - // subsequent PutKeyPolicy request on the KMS key. + // subsequent PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html) + // request on the KMS key. BypassPolicyLockoutSafetyCheck bool noSmithyDocumentSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go index 95b3c2b2656..7c7155e2957 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go @@ -78,6 +78,10 @@ import ( // - Encrypt // - GenerateDataKey // - GenerateDataKeyPair +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) ReEncrypt(ctx context.Context, params *ReEncryptInput, optFns ...func(*Options)) (*ReEncryptOutput, error) { if params == nil { params = &ReEncryptInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go index 625b701df93..e270fe10e6a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go @@ -59,10 +59,10 @@ import ( // operation in the replica key's Region. If you replicate a multi-Region primary // key with imported key material, the replica key is created with no key material. // You must import the same key material that you imported into the primary key. -// For details, see Importing key material into multi-Region keys in the Key -// Management Service Developer Guide. To convert a replica key to a primary key, -// use the UpdatePrimaryRegion operation. ReplicateKey uses different default -// values for the KeyPolicy and Tags parameters than those used in the KMS +// For details, see Importing key material into multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-import.html) +// in the Key Management Service Developer Guide. To convert a replica key to a +// primary key, use the UpdatePrimaryRegion operation. ReplicateKey uses different +// default values for the KeyPolicy and Tags parameters than those used in the KMS // console. For details, see the parameter descriptions. Cross-account use: No. You // cannot use this operation to create a replica key in a different Amazon Web // Services account. Required permissions: @@ -75,6 +75,10 @@ import ( // Related operations // - CreateKey // - UpdatePrimaryRegion +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) ReplicateKey(ctx context.Context, params *ReplicateKeyInput, optFns ...func(*Options)) (*ReplicateKeyOutput, error) { if params == nil { params = &ReplicateKeyInput{} @@ -132,7 +136,8 @@ type ReplicateKeyInput struct { // information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key) // in the Key Management Service Developer Guide. Use this parameter only when you // intend to prevent the principal that is making the request from making a - // subsequent PutKeyPolicy request on the KMS key. + // subsequent PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html) + // request on the KMS key. BypassPolicyLockoutSafetyCheck bool // A description of the KMS key. The default value is an empty string (no diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go index 1b82be01d01..585c4e2e328 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go @@ -24,7 +24,7 @@ import ( // in the Key Management Service Developer Guide . For examples of working with // grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html) // . Cross-account use: Yes. You can retire a grant on a KMS key in a different -// Amazon Web Services account. Required permissions::Permission to retire a grant +// Amazon Web Services account. Required permissions: Permission to retire a grant // is determined primarily by the grant. For details, see Retiring and revoking // grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) // in the Key Management Service Developer Guide. Related operations: @@ -32,6 +32,10 @@ import ( // - ListGrants // - ListRetirableGrants // - RevokeGrant +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) RetireGrant(ctx context.Context, params *RetireGrantInput, optFns ...func(*Options)) (*RetireGrantOutput, error) { if params == nil { params = &RetireGrantInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go index 8c7bfbadc6c..2c325ed3131 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go @@ -29,6 +29,10 @@ import ( // - ListGrants // - ListRetirableGrants // - RetireGrant +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) RevokeGrant(ctx context.Context, params *RevokeGrantInput, optFns ...func(*Options)) (*RevokeGrantOutput, error) { if params == nil { params = &RevokeGrantInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go index 2f60f67c470..5ca0bb1e4cc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go @@ -23,17 +23,17 @@ import ( // associated with it, including all aliases that refer to it. Deleting a KMS key // is a destructive and potentially dangerous operation. When a KMS key is deleted, // all data that was encrypted under the KMS key is unrecoverable. (The only -// exception is a multi-Region replica key , or an asymmetric or HMAC KMS key with -// imported key material .) To prevent the use of a KMS key without deleting it, -// use DisableKey . You can schedule the deletion of a multi-Region primary key and -// its replica keys at any time. However, KMS will not delete a multi-Region -// primary key with existing replica keys. If you schedule the deletion of a -// primary key with replicas, its key state changes to PendingReplicaDeletion and -// it cannot be replicated or used in cryptographic operations. This status can -// continue indefinitely. When the last of its replicas keys is deleted (not just -// scheduled), the key state of the primary key changes to PendingDeletion and its -// waiting period ( PendingWindowInDays ) begins. For details, see Deleting -// multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html) +// exception is a multi-Region replica key (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html) +// , or an asymmetric or HMAC KMS key with imported key material .) To prevent the +// use of a KMS key without deleting it, use DisableKey . You can schedule the +// deletion of a multi-Region primary key and its replica keys at any time. +// However, KMS will not delete a multi-Region primary key with existing replica +// keys. If you schedule the deletion of a primary key with replicas, its key state +// changes to PendingReplicaDeletion and it cannot be replicated or used in +// cryptographic operations. This status can continue indefinitely. When the last +// of its replicas keys is deleted (not just scheduled), the key state of the +// primary key changes to PendingDeletion and its waiting period ( +// PendingWindowInDays ) begins. For details, see Deleting multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html) // in the Key Management Service Developer Guide. When KMS deletes a KMS key from // an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/delete-cmk-keystore.html) // , it makes a best effort to delete the associated key material from the @@ -54,6 +54,10 @@ import ( // Required permissions: kms:ScheduleKeyDeletion (key policy) Related operations // - CancelKeyDeletion // - DisableKey +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) ScheduleKeyDeletion(ctx context.Context, params *ScheduleKeyDeletionInput, optFns ...func(*Options)) (*ScheduleKeyDeletionOutput, error) { if params == nil { params = &ScheduleKeyDeletionInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go index bcf3adc4eaf..da47a82a15c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go @@ -50,7 +50,10 @@ import ( // perform this operation with a KMS key in a different Amazon Web Services // account, specify the key ARN or alias ARN in the value of the KeyId parameter. // Required permissions: kms:Sign (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) Related operations: Verify +// (key policy) Related operations: Verify Eventual consistency: The KMS API +// follows an eventual consistency model. For more information, see KMS eventual +// consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) Sign(ctx context.Context, params *SignInput, optFns ...func(*Options)) (*SignOutput, error) { if params == nil { params = &SignInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go index f4feb4702a7..427ffce8ab0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go @@ -40,6 +40,10 @@ import ( // - ListResourceTags // - ReplicateKey // - UntagResource +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { if params == nil { params = &TagResourceInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go index 9e5bd75fb48..0f46f9c65b3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go @@ -33,6 +33,10 @@ import ( // - ListResourceTags // - ReplicateKey // - TagResource +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { if params == nil { params = &UntagResourceInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go index 1cff25022fc..fb19ed67515 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go @@ -44,6 +44,10 @@ import ( // - CreateAlias // - DeleteAlias // - ListAliases +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) UpdateAlias(ctx context.Context, params *UpdateAliasInput, optFns ...func(*Options)) (*UpdateAliasOutput, error) { if params == nil { params = &UpdateAliasInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go index 047a795c8b3..b9449c1c9b5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go @@ -70,6 +70,10 @@ import ( // - DeleteCustomKeyStore // - DescribeCustomKeyStores // - DisconnectCustomKeyStore +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) UpdateCustomKeyStore(ctx context.Context, params *UpdateCustomKeyStoreInput, optFns ...func(*Options)) (*UpdateCustomKeyStoreOutput, error) { if params == nil { params = &UpdateCustomKeyStoreInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go index dd72d4208d6..60709c79cd3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go @@ -20,6 +20,10 @@ import ( // (key policy) Related operations // - CreateKey // - DescribeKey +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) UpdateKeyDescription(ctx context.Context, params *UpdateKeyDescriptionInput, optFns ...func(*Options)) (*UpdateKeyDescriptionOutput, error) { if params == nil { params = &UpdateKeyDescriptionInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go index af4ea44820d..dd4e0418808 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go @@ -62,6 +62,10 @@ import ( // Related operations // - CreateKey // - ReplicateKey +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) UpdatePrimaryRegion(ctx context.Context, params *UpdatePrimaryRegionInput, optFns ...func(*Options)) (*UpdatePrimaryRegionOutput, error) { if params == nil { params = &UpdatePrimaryRegionInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go index ffa81fdfef3..bab651f03ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go @@ -43,7 +43,10 @@ import ( // perform this operation with a KMS key in a different Amazon Web Services // account, specify the key ARN or alias ARN in the value of the KeyId parameter. // Required permissions: kms:Verify (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) Related operations: Sign +// (key policy) Related operations: Sign Eventual consistency: The KMS API follows +// an eventual consistency model. For more information, see KMS eventual +// consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) Verify(ctx context.Context, params *VerifyInput, optFns ...func(*Options)) (*VerifyOutput, error) { if params == nil { params = &VerifyInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go index 609421dc862..3fcc79ea087 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go @@ -30,7 +30,10 @@ import ( // this operation with a KMS key in a different Amazon Web Services account, // specify the key ARN or alias ARN in the value of the KeyId parameter. Required // permissions: kms:VerifyMac (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) Related operations: GenerateMac +// (key policy) Related operations: GenerateMac Eventual consistency: The KMS API +// follows an eventual consistency model. For more information, see KMS eventual +// consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html) +// . func (c *Client) VerifyMac(ctx context.Context, params *VerifyMacInput, optFns ...func(*Options)) (*VerifyMacOutput, error) { if params == nil { params = &VerifyMacInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go index dafb70143a2..16619a43751 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go @@ -52,6 +52,34 @@ func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) } +type withAnonymous struct { + resolver AuthSchemeResolver +} + +var _ AuthSchemeResolver = (*withAnonymous)(nil) + +func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + opts, err := v.resolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return nil, err + } + + opts = append(opts, &smithyauth.Option{ + SchemeID: smithyauth.SchemeIDAnonymous, + }) + return opts, nil +} + +func wrapWithAnonymousAuth(options *Options) { + if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { + return + } + + options.AuthSchemeResolver = &withAnonymous{ + resolver: options.AuthSchemeResolver, + } +} + // AuthResolverParameters contains the set of inputs necessary for auth scheme // resolution. type AuthResolverParameters struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go index 766885cbb8c..0538b4ef969 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go @@ -366,7 +366,7 @@ func (r *resolver) ResolveEndpoint( } } if _UseFIPS == true { - if true == _PartitionResult.SupportsFIPS { + if _PartitionResult.SupportsFIPS == true { uriString := func() string { var out strings.Builder out.WriteString("https://kms-fips.") diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go index b1e237e444a..68214996641 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go @@ -3,4 +3,4 @@ package kms // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.27.2" +const goModuleVersion = "1.27.6" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go index b4057f7eb59..b24e2565947 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go @@ -70,11 +70,9 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. If specified in an operation call's functional - // options with a value that is different than the constructed client's Options, - // the Client's Retryer will be wrapped to use the operation's specific + // per operation call's retry max attempts. If specified in an operation call's + // functional options with a value that is different than the constructed client's + // Options, the Client's Retryer will be wrapped to use the operation's specific // RetryMaxAttempts value. RetryMaxAttempts int diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go index dec2d3ecbe0..0bc4b4318e9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go @@ -1344,8 +1344,8 @@ func (e *XksProxyVpcEndpointServiceInUseException) ErrorFault() smithy.ErrorFaul // The request was rejected because the Amazon VPC endpoint service configuration // does not fulfill the requirements for an external key store proxy. For details, -// see the exception message and review the requirements for Amazon VPC endpoint -// service connectivity for an external key store. +// see the exception message and review the requirements (https://docs.aws.amazon.com/kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements) +// for Amazon VPC endpoint service connectivity for an external key store. type XksProxyVpcEndpointServiceInvalidConfigurationException struct { Message *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index 73621142dc0..7a4c30c59c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,15 @@ +# v1.18.5 (2023-12-08) + +* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. + +# v1.18.4 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2023-12-06) + +* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. + # v1.18.2 (2023-12-01) * **Bug Fix**: Correct wrapping of errors in authentication workflow. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go index 879dc35a11d..e439699253b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -42,6 +42,8 @@ func New(options Options, optFns ...func(*Options)) *Client { setResolvedDefaultsMode(&options) + resolveRetryer(&options) + resolveHTTPClient(&options) resolveHTTPSignerV4(&options) @@ -54,10 +56,12 @@ func New(options Options, optFns ...func(*Options)) *Client { fn(&options) } - resolveRetryer(&options) + finalizeRetryMaxAttempts(&options) ignoreAnonymousAuth(&options) + wrapWithAnonymousAuth(&options) + resolveAuthSchemes(&options) client := &Client{ @@ -85,7 +89,7 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf fn(&options) } - finalizeRetryMaxAttemptOptions(&options, *c) + finalizeOperationRetryMaxAttempts(&options, *c) finalizeClientEndpointResolverOptions(&options) @@ -333,7 +337,15 @@ func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { o.RetryMaxAttempts = cfg.RetryMaxAttempts } -func finalizeRetryMaxAttemptOptions(o *Options, client Client) { +func finalizeRetryMaxAttempts(o *Options) { + if o.RetryMaxAttempts == 0 { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func finalizeOperationRetryMaxAttempts(o *Options, client Client) { if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { return } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go index f7702dd0d3b..3b28e825dd0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go @@ -52,6 +52,34 @@ func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) } +type withAnonymous struct { + resolver AuthSchemeResolver +} + +var _ AuthSchemeResolver = (*withAnonymous)(nil) + +func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + opts, err := v.resolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return nil, err + } + + opts = append(opts, &smithyauth.Option{ + SchemeID: smithyauth.SchemeIDAnonymous, + }) + return opts, nil +} + +func wrapWithAnonymousAuth(options *Options) { + if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { + return + } + + options.AuthSchemeResolver = &withAnonymous{ + resolver: options.AuthSchemeResolver, + } +} + // AuthResolverParameters contains the set of inputs necessary for auth scheme // resolution. type AuthResolverParameters struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index 7322f306ea5..52495f1fb60 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.2" +const goModuleVersion = "1.18.5" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go index ddc5c0df369..5dee7e53f47 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go @@ -70,11 +70,9 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. If specified in an operation call's functional - // options with a value that is different than the constructed client's Options, - // the Client's Retryer will be wrapped to use the operation's specific + // per operation call's retry max attempts. If specified in an operation call's + // functional options with a value that is different than the constructed client's + // Options, the Client's Retryer will be wrapped to use the operation's specific // RetryMaxAttempts value. RetryMaxAttempts int diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index 446559ca96d..80df3bdde8b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,15 @@ +# v1.21.5 (2023-12-08) + +* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. + +# v1.21.4 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.3 (2023-12-06) + +* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. + # v1.21.2 (2023-12-01) * **Bug Fix**: Correct wrapping of errors in authentication workflow. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go index 59239741a91..fed0897390b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -42,6 +42,8 @@ func New(options Options, optFns ...func(*Options)) *Client { setResolvedDefaultsMode(&options) + resolveRetryer(&options) + resolveHTTPClient(&options) resolveHTTPSignerV4(&options) @@ -54,10 +56,12 @@ func New(options Options, optFns ...func(*Options)) *Client { fn(&options) } - resolveRetryer(&options) + finalizeRetryMaxAttempts(&options) ignoreAnonymousAuth(&options) + wrapWithAnonymousAuth(&options) + resolveAuthSchemes(&options) client := &Client{ @@ -85,7 +89,7 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf fn(&options) } - finalizeRetryMaxAttemptOptions(&options, *c) + finalizeOperationRetryMaxAttempts(&options, *c) finalizeClientEndpointResolverOptions(&options) @@ -333,7 +337,15 @@ func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { o.RetryMaxAttempts = cfg.RetryMaxAttempts } -func finalizeRetryMaxAttemptOptions(o *Options, client Client) { +func finalizeRetryMaxAttempts(o *Options) { + if o.RetryMaxAttempts == 0 { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func finalizeOperationRetryMaxAttempts(o *Options, client Client) { if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { return } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go index fe88367c4c2..40b3becb9f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go @@ -52,6 +52,34 @@ func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) } +type withAnonymous struct { + resolver AuthSchemeResolver +} + +var _ AuthSchemeResolver = (*withAnonymous)(nil) + +func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + opts, err := v.resolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return nil, err + } + + opts = append(opts, &smithyauth.Option{ + SchemeID: smithyauth.SchemeIDAnonymous, + }) + return opts, nil +} + +func wrapWithAnonymousAuth(options *Options) { + if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { + return + } + + options.AuthSchemeResolver = &withAnonymous{ + resolver: options.AuthSchemeResolver, + } +} + // AuthResolverParameters contains the set of inputs necessary for auth scheme // resolution. type AuthResolverParameters struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index ee0a973640f..98eaaa6d813 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.21.2" +const goModuleVersion = "1.21.5" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go index f338f7d6857..b964e7e1090 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go @@ -70,11 +70,9 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. If specified in an operation call's functional - // options with a value that is different than the constructed client's Options, - // the Client's Retryer will be wrapped to use the operation's specific + // per operation call's retry max attempts. If specified in an operation call's + // functional options with a value that is different than the constructed client's + // Options, the Client's Retryer will be wrapped to use the operation's specific // RetryMaxAttempts value. RetryMaxAttempts int diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 0704f0a5de8..17dd41f3593 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,16 @@ +# v1.26.5 (2023-12-08) + +* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. + +# v1.26.4 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.3 (2023-12-06) + +* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. +* **Bug Fix**: STS `AssumeRoleWithSAML` and `AssumeRoleWithWebIdentity` would incorrectly attempt to use SigV4 authentication. + # v1.26.2 (2023-12-01) * **Bug Fix**: Correct wrapping of errors in authentication workflow. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go index 32c391e56e8..59cc4c70a38 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -46,6 +46,8 @@ func New(options Options, optFns ...func(*Options)) *Client { setResolvedDefaultsMode(&options) + resolveRetryer(&options) + resolveHTTPClient(&options) resolveHTTPSignerV4(&options) @@ -58,10 +60,12 @@ func New(options Options, optFns ...func(*Options)) *Client { fn(&options) } - resolveRetryer(&options) + finalizeRetryMaxAttempts(&options) ignoreAnonymousAuth(&options) + wrapWithAnonymousAuth(&options) + resolveAuthSchemes(&options) client := &Client{ @@ -89,7 +93,7 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf fn(&options) } - finalizeRetryMaxAttemptOptions(&options, *c) + finalizeOperationRetryMaxAttempts(&options, *c) finalizeClientEndpointResolverOptions(&options) @@ -337,7 +341,15 @@ func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { o.RetryMaxAttempts = cfg.RetryMaxAttempts } -func finalizeRetryMaxAttemptOptions(o *Options, client Client) { +func finalizeRetryMaxAttempts(o *Options) { + if o.RetryMaxAttempts == 0 { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func finalizeOperationRetryMaxAttempts(o *Options, client Client) { if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { return } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go index 07d4dc55c59..9db5bfd4348 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go @@ -52,6 +52,34 @@ func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) } +type withAnonymous struct { + resolver AuthSchemeResolver +} + +var _ AuthSchemeResolver = (*withAnonymous)(nil) + +func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + opts, err := v.resolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return nil, err + } + + opts = append(opts, &smithyauth.Option{ + SchemeID: smithyauth.SchemeIDAnonymous, + }) + return opts, nil +} + +func wrapWithAnonymousAuth(options *Options) { + if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { + return + } + + options.AuthSchemeResolver = &withAnonymous{ + resolver: options.AuthSchemeResolver, + } +} + // AuthResolverParameters contains the set of inputs necessary for auth scheme // resolution. type AuthResolverParameters struct { @@ -92,34 +120,12 @@ func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ "AssumeRoleWithSAML": func(params *AuthResolverParameters) []*smithyauth.Option { return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "sts") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - - return props - }(), - }, - {SchemeID: smithyauth.SchemeIDAnonymous}, } }, "AssumeRoleWithWebIdentity": func(params *AuthResolverParameters) []*smithyauth.Option { return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "sts") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - - return props - }(), - }, - {SchemeID: smithyauth.SchemeIDAnonymous}, } }, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 7fd1c74bd6a..61667eb2c2a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.26.2" +const goModuleVersion = "1.26.5" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go index 2c7a03701c8..5c1be79f8c0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go @@ -70,11 +70,9 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. If specified in an operation call's functional - // options with a value that is different than the constructed client's Options, - // the Client's Retryer will be wrapped to use the operation's specific + // per operation call's retry max attempts. If specified in an operation call's + // functional options with a value that is different than the constructed client's + // Options, the Client's Retryer will be wrapped to use the operation's specific // RetryMaxAttempts value. RetryMaxAttempts int diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index 8c83df00eb3..46b115083d1 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,9 @@ +# Release (2023-12-07) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.19.0 + * **Feature**: Support modeled request compression. + # Release (2023-11-30) * No change notes available for this release. diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index bb04fc9e8fe..cd6f7fa45c4 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.1" +const goModuleVersion = "1.19.0" diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go b/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go new file mode 100644 index 00000000000..004d78f2136 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go @@ -0,0 +1,30 @@ +package requestcompression + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" +) + +func gzipCompress(input io.Reader) ([]byte, error) { + var b bytes.Buffer + w, err := gzip.NewWriterLevel(&b, gzip.DefaultCompression) + if err != nil { + return nil, fmt.Errorf("failed to create gzip writer, %v", err) + } + + inBytes, err := io.ReadAll(input) + if err != nil { + return nil, fmt.Errorf("failed read payload to compress, %v", err) + } + + if _, err = w.Write(inBytes); err != nil { + return nil, fmt.Errorf("failed to write payload to be compressed, %v", err) + } + if err = w.Close(); err != nil { + return nil, fmt.Errorf("failed to flush payload being compressed, %v", err) + } + + return b.Bytes(), nil +} diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go b/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go new file mode 100644 index 00000000000..06c16afc113 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go @@ -0,0 +1,52 @@ +package requestcompression + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "net/http" +) + +const captureUncompressedRequestID = "CaptureUncompressedRequest" + +// AddCaptureUncompressedRequestMiddleware captures http request before compress encoding for check +func AddCaptureUncompressedRequestMiddleware(stack *middleware.Stack, buf *bytes.Buffer) error { + return stack.Serialize.Insert(&captureUncompressedRequestMiddleware{ + buf: buf, + }, "RequestCompression", middleware.Before) +} + +type captureUncompressedRequestMiddleware struct { + req *http.Request + buf *bytes.Buffer + bytes []byte +} + +// ID returns id of the captureUncompressedRequestMiddleware +func (*captureUncompressedRequestMiddleware) ID() string { + return captureUncompressedRequestID +} + +// HandleSerialize captures request payload before it is compressed by request compression middleware +func (m *captureUncompressedRequestMiddleware) HandleSerialize(ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, +) ( + output middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := input.Request.(*smithyhttp.Request) + if !ok { + return output, metadata, fmt.Errorf("error when retrieving http request") + } + + _, err = io.Copy(m.buf, request.GetStream()) + if err != nil { + return output, metadata, fmt.Errorf("error when copying http request stream: %q", err) + } + if err = request.RewindStream(); err != nil { + return output, metadata, fmt.Errorf("error when rewinding request stream: %q", err) + } + + return next.HandleSerialize(ctx, input) +} diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go b/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go new file mode 100644 index 00000000000..7c41476039d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go @@ -0,0 +1,103 @@ +// Package requestcompression implements runtime support for smithy-modeled +// request compression. +// +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. +package requestcompression + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" + "io" +) + +const MaxRequestMinCompressSizeBytes = 10485760 + +// Enumeration values for supported compress Algorithms. +const ( + GZIP = "gzip" +) + +type compressFunc func(io.Reader) ([]byte, error) + +var allowedAlgorithms = map[string]compressFunc{ + GZIP: gzipCompress, +} + +// AddRequestCompression add requestCompression middleware to op stack +func AddRequestCompression(stack *middleware.Stack, disabled bool, minBytes int64, algorithms []string) error { + return stack.Serialize.Add(&requestCompression{ + disableRequestCompression: disabled, + requestMinCompressSizeBytes: minBytes, + compressAlgorithms: algorithms, + }, middleware.After) +} + +type requestCompression struct { + disableRequestCompression bool + requestMinCompressSizeBytes int64 + compressAlgorithms []string +} + +// ID returns the ID of the middleware +func (m requestCompression) ID() string { + return "RequestCompression" +} + +// HandleSerialize gzip compress the request's stream/body if enabled by config fields +func (m requestCompression) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if m.disableRequestCompression { + return next.HandleSerialize(ctx, in) + } + // still need to check requestMinCompressSizeBytes in case it is out of range after service client config + if m.requestMinCompressSizeBytes < 0 || m.requestMinCompressSizeBytes > MaxRequestMinCompressSizeBytes { + return out, metadata, fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", m.requestMinCompressSizeBytes) + } + + req, ok := in.Request.(*http.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + for _, algorithm := range m.compressAlgorithms { + compressFunc := allowedAlgorithms[algorithm] + if compressFunc != nil { + if stream := req.GetStream(); stream != nil { + size, found, err := req.StreamLength() + if err != nil { + return out, metadata, fmt.Errorf("error while finding request stream length, %v", err) + } else if !found || size < m.requestMinCompressSizeBytes { + return next.HandleSerialize(ctx, in) + } + + compressedBytes, err := compressFunc(stream) + if err != nil { + return out, metadata, fmt.Errorf("failed to compress request stream, %v", err) + } + + var newReq *http.Request + if newReq, err = req.SetStream(bytes.NewReader(compressedBytes)); err != nil { + return out, metadata, fmt.Errorf("failed to set request stream, %v", err) + } + *req = *newReq + + if val := req.Header.Get("Content-Encoding"); val != "" { + req.Header.Set("Content-Encoding", fmt.Sprintf("%s, %s", val, algorithm)) + } else { + req.Header.Set("Content-Encoding", algorithm) + } + } + break + } + } + + return next.HandleSerialize(ctx, in) +} diff --git a/vendor/github.com/felixge/httpsnoop/.gitignore b/vendor/github.com/felixge/httpsnoop/.gitignore new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/vendor/github.com/felixge/httpsnoop/LICENSE.txt new file mode 100644 index 00000000000..e028b46a9b0 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile new file mode 100644 index 00000000000..4e12afdd90d --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/Makefile @@ -0,0 +1,10 @@ +.PHONY: ci generate clean + +ci: clean generate + go test -race -v ./... + +generate: + go generate . + +clean: + rm -rf *_generated*.go diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md new file mode 100644 index 00000000000..cf6b42f3d77 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/README.md @@ -0,0 +1,95 @@ +# httpsnoop + +Package httpsnoop provides an easy way to capture http related metrics (i.e. +response time, bytes written, and http status code) from your application's +http.Handlers. + +Doing this requires non-trivial wrapping of the http.ResponseWriter interface, +which is also exposed for users interested in a more low-level API. + +[![Go Reference](https://pkg.go.dev/badge/github.com/felixge/httpsnoop.svg)](https://pkg.go.dev/github.com/felixge/httpsnoop) +[![Build Status](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml/badge.svg)](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml) + +## Usage Example + +```go +// myH is your app's http handler, perhaps a http.ServeMux or similar. +var myH http.Handler +// wrappedH wraps myH in order to log every request. +wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + m := httpsnoop.CaptureMetrics(myH, w, r) + log.Printf( + "%s %s (code=%d dt=%s written=%d)", + r.Method, + r.URL, + m.Code, + m.Duration, + m.Written, + ) +}) +http.ListenAndServe(":8080", wrappedH) +``` + +## Why this package exists + +Instrumenting an application's http.Handler is surprisingly difficult. + +However if you google for e.g. "capture ResponseWriter status code" you'll find +lots of advise and code examples that suggest it to be a fairly trivial +undertaking. Unfortunately everything I've seen so far has a high chance of +breaking your application. + +The main problem is that a `http.ResponseWriter` often implements additional +interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and +`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter` +in your own struct that also implements the `http.ResponseWriter` interface +will hide the additional interfaces mentioned above. This has a high change of +introducing subtle bugs into any non-trivial application. + +Another approach I've seen people take is to return a struct that implements +all of the interfaces above. However, that's also problematic, because it's +difficult to fake some of these interfaces behaviors when the underlying +`http.ResponseWriter` doesn't have an implementation. It's also dangerous, +because an application may choose to operate differently, merely because it +detects the presence of these additional interfaces. + +This package solves this problem by checking which additional interfaces a +`http.ResponseWriter` implements, returning a wrapped version implementing the +exact same set of interfaces. + +Additionally this package properly handles edge cases such as `WriteHeader` not +being called, or called more than once, as well as concurrent calls to +`http.ResponseWriter` methods, and even calls happening after the wrapped +`ServeHTTP` has already returned. + +Unfortunately this package is not perfect either. It's possible that it is +still missing some interfaces provided by the go core (let me know if you find +one), and it won't work for applications adding their own interfaces into the +mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying +`http.ResponseWriter` and type-assert the result to its other interfaces. + +However, hopefully the explanation above has sufficiently scared you of rolling +your own solution to this problem. httpsnoop may still break your application, +but at least it tries to avoid it as much as possible. + +Anyway, the real problem here is that smuggling additional interfaces inside +`http.ResponseWriter` is a problematic design choice, but it probably goes as +deep as the Go language specification itself. But that's okay, I still prefer +Go over the alternatives ;). + +## Performance + +``` +BenchmarkBaseline-8 20000 94912 ns/op +BenchmarkCaptureMetrics-8 20000 95461 ns/op +``` + +As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an +overhead of ~500 ns per http request on my machine. However, the margin of +error appears to be larger than that, therefor it should be reasonable to +assume that the overhead introduced by `CaptureMetrics` is absolutely +negligible. + +## License + +MIT diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go new file mode 100644 index 00000000000..bec7b71b39c --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go @@ -0,0 +1,86 @@ +package httpsnoop + +import ( + "io" + "net/http" + "time" +) + +// Metrics holds metrics captured from CaptureMetrics. +type Metrics struct { + // Code is the first http response code passed to the WriteHeader func of + // the ResponseWriter. If no such call is made, a default code of 200 is + // assumed instead. + Code int + // Duration is the time it took to execute the handler. + Duration time.Duration + // Written is the number of bytes successfully written by the Write or + // ReadFrom function of the ResponseWriter. ResponseWriters may also write + // data to their underlaying connection directly (e.g. headers), but those + // are not tracked. Therefor the number of Written bytes will usually match + // the size of the response body. + Written int64 +} + +// CaptureMetrics wraps the given hnd, executes it with the given w and r, and +// returns the metrics it captured from it. +func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics { + return CaptureMetricsFn(w, func(ww http.ResponseWriter) { + hnd.ServeHTTP(ww, r) + }) +} + +// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the +// resulting metrics. This is very similar to CaptureMetrics (which is just +// sugar on top of this func), but is a more usable interface if your +// application doesn't use the Go http.Handler interface. +func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics { + m := Metrics{Code: http.StatusOK} + m.CaptureMetrics(w, fn) + return m +} + +// CaptureMetrics wraps w and calls fn with the wrapped w and updates +// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn, +// but allows one to customize starting Metrics object. +func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) { + var ( + start = time.Now() + headerWritten bool + hooks = Hooks{ + WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc { + return func(code int) { + next(code) + + if !(code >= 100 && code <= 199) && !headerWritten { + m.Code = code + headerWritten = true + } + } + }, + + Write: func(next WriteFunc) WriteFunc { + return func(p []byte) (int, error) { + n, err := next(p) + + m.Written += int64(n) + headerWritten = true + return n, err + } + }, + + ReadFrom: func(next ReadFromFunc) ReadFromFunc { + return func(src io.Reader) (int64, error) { + n, err := next(src) + + headerWritten = true + m.Written += n + return n, err + } + }, + } + ) + + fn(Wrap(w, hooks)) + m.Duration += time.Since(start) +} diff --git a/vendor/github.com/felixge/httpsnoop/docs.go b/vendor/github.com/felixge/httpsnoop/docs.go new file mode 100644 index 00000000000..203c35b3c6d --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/docs.go @@ -0,0 +1,10 @@ +// Package httpsnoop provides an easy way to capture http related metrics (i.e. +// response time, bytes written, and http status code) from your application's +// http.Handlers. +// +// Doing this requires non-trivial wrapping of the http.ResponseWriter +// interface, which is also exposed for users interested in a more low-level +// API. +package httpsnoop + +//go:generate go run codegen/main.go diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go new file mode 100644 index 00000000000..101cedde674 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go @@ -0,0 +1,436 @@ +// +build go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// PushFunc is part of the http.Pusher interface. +type PushFunc func(target string, opts *http.PushOptions) error + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc + Push func(PushFunc) PushFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// - http.Pusher +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + _, i4 := w.(http.Pusher) + switch { + // combination 1/32 + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/32 + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Pusher + }{rw, rw, rw} + // combination 3/32 + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 4/32 + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw} + // combination 5/32 + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 6/32 + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + http.Pusher + }{rw, rw, rw, rw} + // combination 7/32 + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 8/32 + case !i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 9/32 + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 10/32 + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw} + // combination 11/32 + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 12/32 + case !i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 13/32 + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 14/32 + case !i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 15/32 + case !i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 16/32 + case !i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 17/32 + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 18/32 + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Pusher + }{rw, rw, rw, rw} + // combination 19/32 + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 20/32 + case i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 21/32 + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 22/32 + case i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 23/32 + case i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 24/32 + case i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 25/32 + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 26/32 + case i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 27/32 + case i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 28/32 + case i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 29/32 + case i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 30/32 + case i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 31/32 + case i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + // combination 32/32 + case i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +func (w *rw) Push(target string, opts *http.PushOptions) error { + f := w.w.(http.Pusher).Push + if w.h.Push != nil { + f = w.h.Push(f) + } + return f(target, opts) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go new file mode 100644 index 00000000000..e0951df1527 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go @@ -0,0 +1,278 @@ +// +build !go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + switch { + // combination 1/16 + case !i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/16 + case !i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 3/16 + case !i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 4/16 + case !i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 5/16 + case !i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 6/16 + case !i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 7/16 + case !i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 8/16 + case !i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 9/16 + case i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 10/16 + case i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 11/16 + case i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 12/16 + case i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 13/16 + case i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 14/16 + case i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 15/16 + case i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 16/16 + case i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/go-kit/log/README.md b/vendor/github.com/go-kit/log/README.md index a0931951df8..8067794657c 100644 --- a/vendor/github.com/go-kit/log/README.md +++ b/vendor/github.com/go-kit/log/README.md @@ -1,5 +1,10 @@ # package log +[![Go Reference](https://pkg.go.dev/badge/github.com/go-kit/log.svg)](https://pkg.go.dev/github.com/go-kit/log) +[![Go Report Card](https://goreportcard.com/badge/go-kit/log)](https://goreportcard.com/report/go-kit/log) +[![GitHub Actions](https://github.com/go-kit/log/actions/workflows/test.yml/badge.svg)](https://github.com/go-kit/log/actions/workflows/test.yml) +[![Coverage Status](https://coveralls.io/repos/github/go-kit/log/badge.svg?branch=main)](https://coveralls.io/github/go-kit/log?branch=main) + `package log` provides a minimal interface for structured logging in services. It may be wrapped to encode conventions, enforce type-safety, provide leveled logging, and so on. It can be used for both typical application log events, diff --git a/vendor/github.com/go-kit/log/json_logger.go b/vendor/github.com/go-kit/log/json_logger.go index 0cedbf82478..d0faed4f098 100644 --- a/vendor/github.com/go-kit/log/json_logger.go +++ b/vendor/github.com/go-kit/log/json_logger.go @@ -68,7 +68,7 @@ func safeString(str fmt.Stringer) (s string) { if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { s = "NULL" } else { - panic(panicVal) + s = fmt.Sprintf("PANIC in String method: %v", panicVal) } } }() @@ -82,7 +82,7 @@ func safeError(err error) (s interface{}) { if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { s = nil } else { - panic(panicVal) + s = fmt.Sprintf("PANIC in Error method: %v", panicVal) } } }() diff --git a/vendor/github.com/go-kit/log/level/doc.go b/vendor/github.com/go-kit/log/level/doc.go index 505d307b11b..fd681dcf922 100644 --- a/vendor/github.com/go-kit/log/level/doc.go +++ b/vendor/github.com/go-kit/log/level/doc.go @@ -7,6 +7,17 @@ // logger = level.NewFilter(logger, level.AllowInfo()) // <-- // logger = log.With(logger, "ts", log.DefaultTimestampUTC) // +// It's also possible to configure log level from a string. For instance from +// a flag, environment variable or configuration file. +// +// fs := flag.NewFlagSet("myprogram") +// lvl := fs.String("log", "info", "debug, info, warn, error") +// +// var logger log.Logger +// logger = log.NewLogfmtLogger(os.Stderr) +// logger = level.NewFilter(logger, level.Allow(level.ParseDefault(*lvl, level.InfoValue()))) // <-- +// logger = log.With(logger, "ts", log.DefaultTimestampUTC) +// // Then, at the callsites, use one of the level.Debug, Info, Warn, or Error // helper methods to emit leveled log events. // diff --git a/vendor/github.com/go-kit/log/level/level.go b/vendor/github.com/go-kit/log/level/level.go index c94756c6bea..c641d985524 100644 --- a/vendor/github.com/go-kit/log/level/level.go +++ b/vendor/github.com/go-kit/log/level/level.go @@ -1,6 +1,14 @@ package level -import "github.com/go-kit/log" +import ( + "errors" + "strings" + + "github.com/go-kit/log" +) + +// ErrInvalidLevelString is returned whenever an invalid string is passed to Parse. +var ErrInvalidLevelString = errors.New("invalid level string") // Error returns a logger that includes a Key/ErrorValue pair. func Error(logger log.Logger) log.Logger { @@ -66,6 +74,22 @@ func (l *logger) Log(keyvals ...interface{}) error { // Option sets a parameter for the leveled logger. type Option func(*logger) +// Allow the provided log level to pass. +func Allow(v Value) Option { + switch v { + case debugValue: + return AllowDebug() + case infoValue: + return AllowInfo() + case warnValue: + return AllowWarn() + case errorValue: + return AllowError() + default: + return AllowNone() + } +} + // AllowAll is an alias for AllowDebug. func AllowAll() Option { return AllowDebug() @@ -100,6 +124,33 @@ func allowed(allowed level) Option { return func(l *logger) { l.allowed = allowed } } +// Parse a string to its corresponding level value. Valid strings are "debug", +// "info", "warn", and "error". Strings are normalized via strings.TrimSpace and +// strings.ToLower. +func Parse(level string) (Value, error) { + switch strings.TrimSpace(strings.ToLower(level)) { + case debugValue.name: + return debugValue, nil + case infoValue.name: + return infoValue, nil + case warnValue.name: + return warnValue, nil + case errorValue.name: + return errorValue, nil + default: + return nil, ErrInvalidLevelString + } +} + +// ParseDefault calls Parse and returns the default Value on error. +func ParseDefault(level string, def Value) Value { + v, err := Parse(level) + if err != nil { + return def + } + return v +} + // ErrNotAllowed sets the error to return from Log when it squelches a log // event disallowed by the configured Allow[Level] option. By default, // ErrNotAllowed is nil; in this case the log event is squelched with no diff --git a/vendor/github.com/go-kit/log/staticcheck.conf b/vendor/github.com/go-kit/log/staticcheck.conf new file mode 100644 index 00000000000..528438b97d2 --- /dev/null +++ b/vendor/github.com/go-kit/log/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all"] diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index ab593118131..a8c29bfbd53 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging without becoming coupled to a particular logging implementation. This is not @@ -73,6 +74,29 @@ received: If the Go standard library had defined an interface for logging, this project probably would not be needed. Alas, here we are. +When the Go developers started developing such an interface with +[slog](https://github.com/golang/go/issues/56345), they adopted some of the +logr design but also left out some parts and changed others: + +| Feature | logr | slog | +|---------|------|------| +| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) | +| Low-level API | `LogSink` | `Handler` | +| Stack unwinding | done by `LogSink` | done by `Logger` | +| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) | +| Generating a value for logging on demand | `Marshaler` | `LogValuer` | +| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" | +| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` | +| Passing logger via context | `NewContext`, `FromContext` | no API | +| Adding a name to a logger | `WithName` | no API | +| Modify verbosity of log entries in a call chain | `V` | no API | +| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | + +The high-level slog API is explicitly meant to be one of many different APIs +that can be layered on top of a shared `slog.Handler`. logr is one such +alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr) +package. + ### Inspiration Before you consider this package, please read [this blog post by the @@ -118,6 +142,91 @@ There are implementations for the following logging libraries: - **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) - **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) +## slog interoperability + +Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` +and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and +`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`. +As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level +slog API. `slogr` itself leaves that to the caller. + +## Using a `logr.Sink` as backend for slog + +Ideally, a logr sink implementation should support both logr and slog by +implementing both the normal logr interface(s) and `slogr.SlogSink`. Because +of a conflict in the parameters of the common `Enabled` method, it is [not +possible to implement both slog.Handler and logr.Sink in the same +type](https://github.com/golang/go/issues/59110). + +If both are supported, log calls can go from the high-level APIs to the backend +without the need to convert parameters. `NewLogr` and `NewSlogHandler` can +convert back and forth without adding additional wrappers, with one exception: +when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then +`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future +log calls. + +Such an implementation should also support values that implement specific +interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`, +`slog.GroupValue`). logr does not convert those. + +Not supporting slog has several drawbacks: +- Recording source code locations works correctly if the handler gets called + through `slog.Logger`, but may be wrong in other cases. That's because a + `logr.Sink` does its own stack unwinding instead of using the program counter + provided by the high-level API. +- slog levels <= 0 can be mapped to logr levels by negating the level without a + loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as + used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink + because logr does not support "more important than info" levels. +- The slog group concept is supported by prefixing each key in a key/value + pair with the group names, separated by a dot. For structured output like + JSON it would be better to group the key/value pairs inside an object. +- Special slog values and interfaces don't work as expected. +- The overhead is likely to be higher. + +These drawbacks are severe enough that applications using a mixture of slog and +logr should switch to a different backend. + +## Using a `slog.Handler` as backend for logr + +Using a plain `slog.Handler` without support for logr works better than the +other direction: +- All logr verbosity levels can be mapped 1:1 to their corresponding slog level + by negating them. +- Stack unwinding is done by the `slogr.SlogSink` and the resulting program + counter is passed to the `slog.Handler`. +- Names added via `Logger.WithName` are gathered and recorded in an additional + attribute with `logger` as key and the names separated by slash as value. +- `Logger.Error` is turned into a log record with `slog.LevelError` as level + and an additional attribute with `err` as key, if an error was provided. + +The main drawback is that `logr.Marshaler` will not be supported. Types should +ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility +with logr implementations without slog support is not important, then +`slog.Valuer` is sufficient. + +## Context support for slog + +Storing a logger in a `context.Context` is not supported by +slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this +to fill this gap: + + func HandlerFromContext(ctx context.Context) slog.Handler { + logger, err := logr.FromContext(ctx) + if err == nil { + return slogr.NewSlogHandler(logger) + } + return slog.Default().Handler() + } + + func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context { + return logr.NewContext(ctx, slogr.NewLogr(handler)) + } + +The downside is that storing and retrieving a `slog.Handler` needs more +allocations compared to using a `logr.Logger`. Therefore the recommendation is +to use the `logr.Logger` API in code which uses contextual logging. + ## FAQ ### Conceptual @@ -241,7 +350,9 @@ Otherwise, you can start out with `0` as "you always want to see this", Then gradually choose levels in between as you need them, working your way down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs.) +info-type logs). For reference, slog pre-defines -4 for debug logs +(corresponds to 4 in logr), which matches what is +[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). #### How do I choose my keys? diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md new file mode 100644 index 00000000000..1ca756fc7b3 --- /dev/null +++ b/vendor/github.com/go-logr/logr/SECURITY.md @@ -0,0 +1,18 @@ +# Security Policy + +If you have discovered a security vulnerability in this project, please report it +privately. **Do not disclose it as a public issue.** This gives us time to work with you +to fix the issue before public exposure, reducing the chance that the exploit will be +used before a patch is released. + +You may submit the report in the following ways: + +- send an email to go-logr-security@googlegroups.com +- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new) + +Please provide the following information in your report: + +- A description of the vulnerability and its impact +- How to reproduce the issue + +We ask that you give us 90 days to work on a fix before public exposure. diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index e52f0cd01e2..12e5807cc5c 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -116,17 +116,17 @@ type Options struct { // Equivalent hooks are offered for key-value pairs saved via // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and // for user-provided pairs (see RenderArgsHook). - RenderBuiltinsHook func(kvList []interface{}) []interface{} + RenderBuiltinsHook func(kvList []any) []any // RenderValuesHook is the same as RenderBuiltinsHook, except that it is // only called for key-value pairs saved via logr.Logger.WithValues. See // RenderBuiltinsHook for more details. - RenderValuesHook func(kvList []interface{}) []interface{} + RenderValuesHook func(kvList []any) []any // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only // called for key-value pairs passed directly to Info and Error. See // RenderBuiltinsHook for more details. - RenderArgsHook func(kvList []interface{}) []interface{} + RenderArgsHook func(kvList []any) []any // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct // that contains a struct, etc.) it may log. Every time it finds a struct, @@ -163,7 +163,7 @@ func (l fnlogger) WithName(name string) logr.LogSink { return &l } -func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { +func (l fnlogger) WithValues(kvList ...any) logr.LogSink { l.Formatter.AddValues(kvList) return &l } @@ -173,12 +173,12 @@ func (l fnlogger) WithCallDepth(depth int) logr.LogSink { return &l } -func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { +func (l fnlogger) Info(level int, msg string, kvList ...any) { prefix, args := l.FormatInfo(level, msg, kvList) l.write(prefix, args) } -func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { +func (l fnlogger) Error(err error, msg string, kvList ...any) { prefix, args := l.FormatError(err, msg, kvList) l.write(prefix, args) } @@ -229,7 +229,7 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { type Formatter struct { outputFormat outputFormat prefix string - values []interface{} + values []any valuesStr string depth int opts *Options @@ -246,10 +246,10 @@ const ( ) // PseudoStruct is a list of key-value pairs that gets logged as a struct. -type PseudoStruct []interface{} +type PseudoStruct []any // render produces a log line, ready to use. -func (f Formatter) render(builtins, args []interface{}) string { +func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) if f.outputFormat == outputJSON { @@ -292,7 +292,7 @@ func (f Formatter) render(builtins, args []interface{}) string { // This function returns a potentially modified version of kvList, which // ensures that there is a value for every key (adding a value if needed) and // that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { @@ -334,7 +334,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing b return kvList } -func (f Formatter) pretty(value interface{}) string { +func (f Formatter) pretty(value any) string { return f.prettyWithFlags(value, 0, 0) } @@ -343,7 +343,7 @@ const ( ) // TODO: This is not fast. Most of the overhead goes here. -func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { +func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { if depth > f.opts.MaxLogDepth { return `""` } @@ -614,7 +614,7 @@ func isEmpty(v reflect.Value) bool { return false } -func invokeMarshaler(m logr.Marshaler) (ret interface{}) { +func invokeMarshaler(m logr.Marshaler) (ret any) { defer func() { if r := recover(); r != nil { ret = fmt.Sprintf("", r) @@ -675,12 +675,12 @@ func (f Formatter) caller() Caller { const noValue = "" -func (f Formatter) nonStringKey(v interface{}) string { +func (f Formatter) nonStringKey(v any) string { return fmt.Sprintf("", f.snippet(v)) } // snippet produces a short snippet string of an arbitrary value. -func (f Formatter) snippet(v interface{}) string { +func (f Formatter) snippet(v any) string { const snipLen = 16 snip := f.pretty(v) @@ -693,7 +693,7 @@ func (f Formatter) snippet(v interface{}) string { // sanitize ensures that a list of key-value pairs has a value for every key // (adding a value if needed) and that each key is a string (substituting a key // if needed). -func (f Formatter) sanitize(kvList []interface{}) []interface{} { +func (f Formatter) sanitize(kvList []any) []any { if len(kvList)%2 != 0 { kvList = append(kvList, noValue) } @@ -727,8 +727,8 @@ func (f Formatter) GetDepth() int { // FormatInfo renders an Info log message into strings. The prefix will be // empty when no names were set (via AddNames), or when the output is // configured for JSON. -func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { - args := make([]interface{}, 0, 64) // using a constant here impacts perf +func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) { + args := make([]any, 0, 64) // using a constant here impacts perf prefix = f.prefix if f.outputFormat == outputJSON { args = append(args, "logger", prefix) @@ -745,10 +745,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (pref } // FormatError renders an Error log message into strings. The prefix will be -// empty when no names were set (via AddNames), or when the output is +// empty when no names were set (via AddNames), or when the output is // configured for JSON. -func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { - args := make([]interface{}, 0, 64) // using a constant here impacts perf +func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) { + args := make([]any, 0, 64) // using a constant here impacts perf prefix = f.prefix if f.outputFormat == outputJSON { args = append(args, "logger", prefix) @@ -761,12 +761,12 @@ func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (pre args = append(args, "caller", f.caller()) } args = append(args, "msg", msg) - var loggableErr interface{} + var loggableErr any if err != nil { loggableErr = err.Error() } args = append(args, "error", loggableErr) - return f.prefix, f.render(args, kvList) + return prefix, f.render(args, kvList) } // AddName appends the specified name. funcr uses '/' characters to separate @@ -781,7 +781,7 @@ func (f *Formatter) AddName(name string) { // AddValues adds key-value pairs to the set of saved values to be logged with // each log line. -func (f *Formatter) AddValues(kvList []interface{}) { +func (f *Formatter) AddValues(kvList []any) { // Three slice args forces a copy. n := len(f.values) f.values = append(f.values[:n:n], kvList...) diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index e027aea3fd3..2a5075a180f 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -127,9 +127,9 @@ limitations under the License. // such a value can call its methods without having to check whether the // instance is ready for use. // -// Calling methods with the null logger (Logger{}) as instance will crash -// because it has no LogSink. Therefore this null logger should never be passed -// around. For cases where passing a logger is optional, a pointer to Logger +// The zero logger (= Logger{}) is identical to Discard() and discards all log +// entries. Code that receives a Logger by value can simply call it, the methods +// will never crash. For cases where passing a logger is optional, a pointer to Logger // should be used. // // # Key Naming Conventions @@ -258,6 +258,12 @@ type Logger struct { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info logs. func (l Logger) Enabled() bool { + // Some implementations of LogSink look at the caller in Enabled (e.g. + // different verbosity levels per package or file), but we only pass one + // CallDepth in (via Init). This means that all calls from Logger to the + // LogSink's Enabled, Info, and Error methods must have the same number of + // frames. In other words, Logger methods can't call other Logger methods + // which call these LogSink methods unless we do it the same in all paths. return l.sink != nil && l.sink.Enabled(l.level) } @@ -267,11 +273,11 @@ func (l Logger) Enabled() bool { // line. The key/value pairs can then be used to add additional variable // information. The key/value pairs must alternate string keys and arbitrary // values. -func (l Logger) Info(msg string, keysAndValues ...interface{}) { +func (l Logger) Info(msg string, keysAndValues ...any) { if l.sink == nil { return } - if l.Enabled() { + if l.sink.Enabled(l.level) { // see comment in Enabled if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } @@ -289,7 +295,7 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { // while the err argument should be used to attach the actual error that // triggered this log line, if present. The err parameter is optional // and nil may be passed instead of an error instance. -func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { +func (l Logger) Error(err error, msg string, keysAndValues ...any) { if l.sink == nil { return } @@ -314,9 +320,16 @@ func (l Logger) V(level int) Logger { return l } +// GetV returns the verbosity level of the logger. If the logger's LogSink is +// nil as in the Discard logger, this will always return 0. +func (l Logger) GetV() int { + // 0 if l.sink nil because of the if check in V above. + return l.level +} + // WithValues returns a new Logger instance with additional key/value pairs. // See Info for documentation on how key/value pairs work. -func (l Logger) WithValues(keysAndValues ...interface{}) Logger { +func (l Logger) WithValues(keysAndValues ...any) Logger { if l.sink == nil { return l } @@ -467,15 +480,15 @@ type LogSink interface { // The level argument is provided for optional logging. This method will // only be called when Enabled(level) is true. See Logger.Info for more // details. - Info(level int, msg string, keysAndValues ...interface{}) + Info(level int, msg string, keysAndValues ...any) // Error logs an error, with the given message and key/value pairs as // context. See Logger.Error for more details. - Error(err error, msg string, keysAndValues ...interface{}) + Error(err error, msg string, keysAndValues ...any) // WithValues returns a new LogSink with additional key/value pairs. See // Logger.WithValues for more details. - WithValues(keysAndValues ...interface{}) LogSink + WithValues(keysAndValues ...any) LogSink // WithName returns a new LogSink with the specified name appended. See // Logger.WithName for more details. @@ -546,5 +559,5 @@ type Marshaler interface { // with exported fields // // It may return any value of any type. - MarshalLog() interface{} + MarshalLog() any } diff --git a/vendor/github.com/jellydator/ttlcache/v3/cache.go b/vendor/github.com/jellydator/ttlcache/v3/cache.go index 93943da226b..a1547fcb286 100644 --- a/vendor/github.com/jellydator/ttlcache/v3/cache.go +++ b/vendor/github.com/jellydator/ttlcache/v3/cache.go @@ -478,6 +478,13 @@ func (c *Cache[K, V]) Items() map[K]*Item[K, V] { // Range stops the iteration. func (c *Cache[K, V]) Range(fn func(item *Item[K, V]) bool) { c.items.mu.RLock() + + // Check if cache is empty + if c.items.lru.Len() == 0 { + c.items.mu.RUnlock() + return + } + for item := c.items.lru.Front(); item != c.items.lru.Back().Next(); item = item.Next() { i := item.Value.(*Item[K, V]) c.items.mu.RUnlock() diff --git a/vendor/github.com/letsencrypt/boulder/core/challenges.go b/vendor/github.com/letsencrypt/boulder/core/challenges.go index 4b4a67c4868..1d7e2408d51 100644 --- a/vendor/github.com/letsencrypt/boulder/core/challenges.go +++ b/vendor/github.com/letsencrypt/boulder/core/challenges.go @@ -1,5 +1,7 @@ package core +import "fmt" + func newChallenge(challengeType AcmeChallenge, token string) Challenge { return Challenge{ Type: challengeType, @@ -25,3 +27,19 @@ func DNSChallenge01(token string) Challenge { func TLSALPNChallenge01(token string) Challenge { return newChallenge(ChallengeTypeTLSALPN01, token) } + +// NewChallenge constructs a random challenge of the given kind. It returns an +// error if the challenge type is unrecognized. If token is empty a random token +// will be generated, otherwise the provided token is used. +func NewChallenge(kind AcmeChallenge, token string) (Challenge, error) { + switch kind { + case ChallengeTypeHTTP01: + return HTTPChallenge01(token), nil + case ChallengeTypeDNS01: + return DNSChallenge01(token), nil + case ChallengeTypeTLSALPN01: + return TLSALPNChallenge01(token), nil + default: + return Challenge{}, fmt.Errorf("unrecognized challenge type %q", kind) + } +} diff --git a/vendor/github.com/letsencrypt/boulder/core/interfaces.go b/vendor/github.com/letsencrypt/boulder/core/interfaces.go index 6846bf88807..003329c3f55 100644 --- a/vendor/github.com/letsencrypt/boulder/core/interfaces.go +++ b/vendor/github.com/letsencrypt/boulder/core/interfaces.go @@ -7,7 +7,8 @@ import ( // PolicyAuthority defines the public interface for the Boulder PA // TODO(#5891): Move this interface to a more appropriate location. type PolicyAuthority interface { - WillingToIssueWildcards(identifiers []identifier.ACMEIdentifier) error - ChallengesFor(domain identifier.ACMEIdentifier) ([]Challenge, error) - ChallengeTypeEnabled(t AcmeChallenge) bool + WillingToIssueWildcards([]identifier.ACMEIdentifier) error + ChallengesFor(identifier.ACMEIdentifier) ([]Challenge, error) + ChallengeTypeEnabled(AcmeChallenge) bool + CheckAuthz(*Authorization) error } diff --git a/vendor/github.com/letsencrypt/boulder/core/objects.go b/vendor/github.com/letsencrypt/boulder/core/objects.go index 47ba4b70c16..b52f0f5e0ab 100644 --- a/vendor/github.com/letsencrypt/boulder/core/objects.go +++ b/vendor/github.com/letsencrypt/boulder/core/objects.go @@ -2,7 +2,6 @@ package core import ( "crypto" - "crypto/x509" "encoding/base64" "encoding/json" "fmt" @@ -12,7 +11,7 @@ import ( "time" "golang.org/x/crypto/ocsp" - "gopkg.in/square/go-jose.v2" + "gopkg.in/go-jose/go-jose.v2" "github.com/letsencrypt/boulder/identifier" "github.com/letsencrypt/boulder/probs" @@ -53,7 +52,6 @@ const ( type AcmeChallenge string // These types are the available challenges -// TODO(#5009): Make this a custom type as well. const ( ChallengeTypeHTTP01 = AcmeChallenge("http-01") ChallengeTypeDNS01 = AcmeChallenge("dns-01") @@ -77,54 +75,25 @@ type OCSPStatus string const ( OCSPStatusGood = OCSPStatus("good") OCSPStatusRevoked = OCSPStatus("revoked") + // Not a real OCSP status. This is a placeholder we write before the + // actual precertificate is issued, to ensure we never return "good" before + // issuance succeeds, for BR compliance reasons. + OCSPStatusNotReady = OCSPStatus("wait") ) var OCSPStatusToInt = map[OCSPStatus]int{ - OCSPStatusGood: ocsp.Good, - OCSPStatusRevoked: ocsp.Revoked, + OCSPStatusGood: ocsp.Good, + OCSPStatusRevoked: ocsp.Revoked, + OCSPStatusNotReady: -1, } // DNSPrefix is attached to DNS names in DNS challenges const DNSPrefix = "_acme-challenge" -// CertificateRequest is just a CSR -// -// This data is unmarshalled from JSON by way of RawCertificateRequest, which -// represents the actual structure received from the client. -type CertificateRequest struct { - CSR *x509.CertificateRequest // The CSR - Bytes []byte // The original bytes of the CSR, for logging. -} - type RawCertificateRequest struct { CSR JSONBuffer `json:"csr"` // The encoded CSR } -// UnmarshalJSON provides an implementation for decoding CertificateRequest objects. -func (cr *CertificateRequest) UnmarshalJSON(data []byte) error { - var raw RawCertificateRequest - err := json.Unmarshal(data, &raw) - if err != nil { - return err - } - - csr, err := x509.ParseCertificateRequest(raw.CSR) - if err != nil { - return err - } - - cr.CSR = csr - cr.Bytes = raw.CSR - return nil -} - -// MarshalJSON provides an implementation for encoding CertificateRequest objects. -func (cr CertificateRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(RawCertificateRequest{ - CSR: cr.CSR.Raw, - }) -} - // Registration objects represent non-public metadata attached // to account keys. type Registration struct { @@ -156,7 +125,7 @@ type ValidationRecord struct { URL string `json:"url,omitempty"` // Shared - Hostname string `json:"hostname"` + Hostname string `json:"hostname,omitempty"` Port string `json:"port,omitempty"` AddressesResolved []net.IP `json:"addressesResolved,omitempty"` AddressUsed net.IP `json:"addressUsed,omitempty"` @@ -373,14 +342,18 @@ type Authorization struct { // slice and the order of these challenges may not be predictable. Challenges []Challenge `json:"challenges,omitempty" db:"-"` - // This field is deprecated. It's filled in by WFE for the ACMEv1 API. - Combinations [][]int `json:"combinations,omitempty" db:"combinations"` - - // Wildcard is a Boulder-specific Authorization field that indicates the - // authorization was created as a result of an order containing a name with - // a `*.`wildcard prefix. This will help convey to users that an - // Authorization with the identifier `example.com` and one DNS-01 challenge - // corresponds to a name `*.example.com` from an associated order. + // https://datatracker.ietf.org/doc/html/rfc8555#page-29 + // + // wildcard (optional, boolean): This field MUST be present and true + // for authorizations created as a result of a newOrder request + // containing a DNS identifier with a value that was a wildcard + // domain name. For other authorizations, it MUST be absent. + // Wildcard domain names are described in Section 7.1.3. + // + // This is not represented in the database because we calculate it from + // the identifier stored in the database. Unlike the identifier returned + // as part of the authorization, the identifier we store in the database + // can contain an asterisk. Wildcard bool `json:"wildcard,omitempty" db:"-"` } @@ -399,38 +372,25 @@ func (authz *Authorization) FindChallengeByStringID(id string) int { // SolvedBy will look through the Authorizations challenges, returning the type // of the *first* challenge it finds with Status: valid, or an error if no // challenge is valid. -func (authz *Authorization) SolvedBy() (*AcmeChallenge, error) { +func (authz *Authorization) SolvedBy() (AcmeChallenge, error) { if len(authz.Challenges) == 0 { - return nil, fmt.Errorf("Authorization has no challenges") + return "", fmt.Errorf("Authorization has no challenges") } for _, chal := range authz.Challenges { if chal.Status == StatusValid { - return &chal.Type, nil + return chal.Type, nil } } - return nil, fmt.Errorf("Authorization not solved by any challenge") + return "", fmt.Errorf("Authorization not solved by any challenge") } // JSONBuffer fields get encoded and decoded JOSE-style, in base64url encoding // with stripped padding. type JSONBuffer []byte -// URL-safe base64 encode that strips padding -func base64URLEncode(data []byte) string { - var result = base64.URLEncoding.EncodeToString(data) - return strings.TrimRight(result, "=") -} - -// URL-safe base64 decoder that adds padding -func base64URLDecode(data string) ([]byte, error) { - var missing = (4 - len(data)%4) % 4 - data += strings.Repeat("=", missing) - return base64.URLEncoding.DecodeString(data) -} - // MarshalJSON encodes a JSONBuffer for transmission. func (jb JSONBuffer) MarshalJSON() (result []byte, err error) { - return json.Marshal(base64URLEncode(jb)) + return json.Marshal(base64.RawURLEncoding.EncodeToString(jb)) } // UnmarshalJSON decodes a JSONBuffer to an object. @@ -440,7 +400,7 @@ func (jb *JSONBuffer) UnmarshalJSON(data []byte) (err error) { if err != nil { return err } - *jb, err = base64URLDecode(str) + *jb, err = base64.RawURLEncoding.DecodeString(strings.TrimRight(str, "=")) return } @@ -458,53 +418,46 @@ type Certificate struct { } // CertificateStatus structs are internal to the server. They represent the -// latest data about the status of the certificate, required for OCSP updating -// and for validating that the subscriber has accepted the certificate. +// latest data about the status of the certificate, required for generating new +// OCSP responses and determining if a certificate has been revoked. type CertificateStatus struct { ID int64 `db:"id"` Serial string `db:"serial"` // status: 'good' or 'revoked'. Note that good, expired certificates remain - // with status 'good' but don't necessarily get fresh OCSP responses. + // with status 'good' but don't necessarily get fresh OCSP responses. Status OCSPStatus `db:"status"` // ocspLastUpdated: The date and time of the last time we generated an OCSP - // response. If we have never generated one, this has the zero value of - // time.Time, i.e. Jan 1 1970. + // response. If we have never generated one, this has the zero value of + // time.Time, i.e. Jan 1 1970. OCSPLastUpdated time.Time `db:"ocspLastUpdated"` // revokedDate: If status is 'revoked', this is the date and time it was - // revoked. Otherwise it has the zero value of time.Time, i.e. Jan 1 1970. + // revoked. Otherwise it has the zero value of time.Time, i.e. Jan 1 1970. RevokedDate time.Time `db:"revokedDate"` // revokedReason: If status is 'revoked', this is the reason code for the - // revocation. Otherwise it is zero (which happens to be the reason - // code for 'unspecified'). + // revocation. Otherwise it is zero (which happens to be the reason + // code for 'unspecified'). RevokedReason revocation.Reason `db:"revokedReason"` LastExpirationNagSent time.Time `db:"lastExpirationNagSent"` - // The encoded and signed OCSP response. - OCSPResponse []byte `db:"ocspResponse"` - - // For performance reasons[0] we duplicate the `Expires` field of the - // `Certificates` object/table in `CertificateStatus` to avoid a costly `JOIN` - // later on just to retrieve this `Time` value. This helps both the OCSP - // updater and the expiration-mailer stay performant. - // - // Similarly, we add an explicit `IsExpired` boolean to `CertificateStatus` - // table that the OCSP updater so that the database can create a meaningful - // index on `(isExpired, ocspLastUpdated)` without a `JOIN` on `certificates`. - // For more detail see Boulder #1864[0]. - // - // [0]: https://github.com/letsencrypt/boulder/issues/1864 + // NotAfter and IsExpired are convenience columns which allow expensive + // queries to quickly filter out certificates that we don't need to care about + // anymore. These are particularly useful for the expiration mailer and CRL + // updater. See https://github.com/letsencrypt/boulder/issues/1864. NotAfter time.Time `db:"notAfter"` IsExpired bool `db:"isExpired"` - // TODO(#5152): Change this to an issuance.Issuer(Name)ID after it no longer - // has to support both IssuerNameIDs and IssuerIDs. - IssuerID int64 + // Note: this is not an issuance.IssuerNameID because that would create an + // import cycle between core and issuance. + // Note2: This field used to be called `issuerID`. We keep the old name in + // the DB, but update the Go field name to be clear which type of ID this + // is. + IssuerNameID int64 `db:"issuerID"` } // FQDNSet contains the SHA256 hash of the lowercased, comma joined dNSNames @@ -553,7 +506,7 @@ func RenewalInfoSimple(issued time.Time, expires time.Time) RenewalInfo { } // RenewalInfoImmediate constructs a `RenewalInfo` object with a suggested -// window in the past. Per the draft-ietf-acme-ari-00 spec, clients should +// window in the past. Per the draft-ietf-acme-ari-01 spec, clients should // attempt to renew immediately if the suggested window is in the past. The // passed `now` is assumed to be a timestamp representing the current moment in // time. diff --git a/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go b/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go deleted file mode 100644 index 02b3515bdcd..00000000000 --- a/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go +++ /dev/null @@ -1,1182 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.20.1 -// source: core.proto - -package proto - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Challenge struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` - Status string `protobuf:"bytes,6,opt,name=status,proto3" json:"status,omitempty"` - Uri string `protobuf:"bytes,9,opt,name=uri,proto3" json:"uri,omitempty"` - Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"` - KeyAuthorization string `protobuf:"bytes,5,opt,name=keyAuthorization,proto3" json:"keyAuthorization,omitempty"` - Validationrecords []*ValidationRecord `protobuf:"bytes,10,rep,name=validationrecords,proto3" json:"validationrecords,omitempty"` - Error *ProblemDetails `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` - Validated int64 `protobuf:"varint,11,opt,name=validated,proto3" json:"validated,omitempty"` -} - -func (x *Challenge) Reset() { - *x = Challenge{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Challenge) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Challenge) ProtoMessage() {} - -func (x *Challenge) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Challenge.ProtoReflect.Descriptor instead. -func (*Challenge) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{0} -} - -func (x *Challenge) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *Challenge) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Challenge) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *Challenge) GetUri() string { - if x != nil { - return x.Uri - } - return "" -} - -func (x *Challenge) GetToken() string { - if x != nil { - return x.Token - } - return "" -} - -func (x *Challenge) GetKeyAuthorization() string { - if x != nil { - return x.KeyAuthorization - } - return "" -} - -func (x *Challenge) GetValidationrecords() []*ValidationRecord { - if x != nil { - return x.Validationrecords - } - return nil -} - -func (x *Challenge) GetError() *ProblemDetails { - if x != nil { - return x.Error - } - return nil -} - -func (x *Challenge) GetValidated() int64 { - if x != nil { - return x.Validated - } - return 0 -} - -type ValidationRecord struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` - Port string `protobuf:"bytes,2,opt,name=port,proto3" json:"port,omitempty"` - AddressesResolved [][]byte `protobuf:"bytes,3,rep,name=addressesResolved,proto3" json:"addressesResolved,omitempty"` // net.IP.MarshalText() - AddressUsed []byte `protobuf:"bytes,4,opt,name=addressUsed,proto3" json:"addressUsed,omitempty"` // net.IP.MarshalText() - Authorities []string `protobuf:"bytes,5,rep,name=authorities,proto3" json:"authorities,omitempty"` - Url string `protobuf:"bytes,6,opt,name=url,proto3" json:"url,omitempty"` - // A list of addresses tried before the address used (see - // core/objects.go and the comment on the ValidationRecord structure - // definition for more information. - AddressesTried [][]byte `protobuf:"bytes,7,rep,name=addressesTried,proto3" json:"addressesTried,omitempty"` // net.IP.MarshalText() -} - -func (x *ValidationRecord) Reset() { - *x = ValidationRecord{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValidationRecord) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidationRecord) ProtoMessage() {} - -func (x *ValidationRecord) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidationRecord.ProtoReflect.Descriptor instead. -func (*ValidationRecord) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{1} -} - -func (x *ValidationRecord) GetHostname() string { - if x != nil { - return x.Hostname - } - return "" -} - -func (x *ValidationRecord) GetPort() string { - if x != nil { - return x.Port - } - return "" -} - -func (x *ValidationRecord) GetAddressesResolved() [][]byte { - if x != nil { - return x.AddressesResolved - } - return nil -} - -func (x *ValidationRecord) GetAddressUsed() []byte { - if x != nil { - return x.AddressUsed - } - return nil -} - -func (x *ValidationRecord) GetAuthorities() []string { - if x != nil { - return x.Authorities - } - return nil -} - -func (x *ValidationRecord) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *ValidationRecord) GetAddressesTried() [][]byte { - if x != nil { - return x.AddressesTried - } - return nil -} - -type ProblemDetails struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ProblemType string `protobuf:"bytes,1,opt,name=problemType,proto3" json:"problemType,omitempty"` - Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"` - HttpStatus int32 `protobuf:"varint,3,opt,name=httpStatus,proto3" json:"httpStatus,omitempty"` -} - -func (x *ProblemDetails) Reset() { - *x = ProblemDetails{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ProblemDetails) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProblemDetails) ProtoMessage() {} - -func (x *ProblemDetails) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProblemDetails.ProtoReflect.Descriptor instead. -func (*ProblemDetails) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{2} -} - -func (x *ProblemDetails) GetProblemType() string { - if x != nil { - return x.ProblemType - } - return "" -} - -func (x *ProblemDetails) GetDetail() string { - if x != nil { - return x.Detail - } - return "" -} - -func (x *ProblemDetails) GetHttpStatus() int32 { - if x != nil { - return x.HttpStatus - } - return 0 -} - -type Certificate struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial,omitempty"` - Digest string `protobuf:"bytes,3,opt,name=digest,proto3" json:"digest,omitempty"` - Der []byte `protobuf:"bytes,4,opt,name=der,proto3" json:"der,omitempty"` - Issued int64 `protobuf:"varint,5,opt,name=issued,proto3" json:"issued,omitempty"` // Unix timestamp (nanoseconds) - Expires int64 `protobuf:"varint,6,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds) -} - -func (x *Certificate) Reset() { - *x = Certificate{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Certificate) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Certificate) ProtoMessage() {} - -func (x *Certificate) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Certificate.ProtoReflect.Descriptor instead. -func (*Certificate) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{3} -} - -func (x *Certificate) GetRegistrationID() int64 { - if x != nil { - return x.RegistrationID - } - return 0 -} - -func (x *Certificate) GetSerial() string { - if x != nil { - return x.Serial - } - return "" -} - -func (x *Certificate) GetDigest() string { - if x != nil { - return x.Digest - } - return "" -} - -func (x *Certificate) GetDer() []byte { - if x != nil { - return x.Der - } - return nil -} - -func (x *Certificate) GetIssued() int64 { - if x != nil { - return x.Issued - } - return 0 -} - -func (x *Certificate) GetExpires() int64 { - if x != nil { - return x.Expires - } - return 0 -} - -type CertificateStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` - Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` - OcspLastUpdated int64 `protobuf:"varint,4,opt,name=ocspLastUpdated,proto3" json:"ocspLastUpdated,omitempty"` - RevokedDate int64 `protobuf:"varint,5,opt,name=revokedDate,proto3" json:"revokedDate,omitempty"` - RevokedReason int64 `protobuf:"varint,6,opt,name=revokedReason,proto3" json:"revokedReason,omitempty"` - LastExpirationNagSent int64 `protobuf:"varint,7,opt,name=lastExpirationNagSent,proto3" json:"lastExpirationNagSent,omitempty"` - OcspResponse []byte `protobuf:"bytes,8,opt,name=ocspResponse,proto3" json:"ocspResponse,omitempty"` - NotAfter int64 `protobuf:"varint,9,opt,name=notAfter,proto3" json:"notAfter,omitempty"` - IsExpired bool `protobuf:"varint,10,opt,name=isExpired,proto3" json:"isExpired,omitempty"` - IssuerID int64 `protobuf:"varint,11,opt,name=issuerID,proto3" json:"issuerID,omitempty"` -} - -func (x *CertificateStatus) Reset() { - *x = CertificateStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CertificateStatus) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CertificateStatus) ProtoMessage() {} - -func (x *CertificateStatus) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CertificateStatus.ProtoReflect.Descriptor instead. -func (*CertificateStatus) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{4} -} - -func (x *CertificateStatus) GetSerial() string { - if x != nil { - return x.Serial - } - return "" -} - -func (x *CertificateStatus) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *CertificateStatus) GetOcspLastUpdated() int64 { - if x != nil { - return x.OcspLastUpdated - } - return 0 -} - -func (x *CertificateStatus) GetRevokedDate() int64 { - if x != nil { - return x.RevokedDate - } - return 0 -} - -func (x *CertificateStatus) GetRevokedReason() int64 { - if x != nil { - return x.RevokedReason - } - return 0 -} - -func (x *CertificateStatus) GetLastExpirationNagSent() int64 { - if x != nil { - return x.LastExpirationNagSent - } - return 0 -} - -func (x *CertificateStatus) GetOcspResponse() []byte { - if x != nil { - return x.OcspResponse - } - return nil -} - -func (x *CertificateStatus) GetNotAfter() int64 { - if x != nil { - return x.NotAfter - } - return 0 -} - -func (x *CertificateStatus) GetIsExpired() bool { - if x != nil { - return x.IsExpired - } - return false -} - -func (x *CertificateStatus) GetIssuerID() int64 { - if x != nil { - return x.IssuerID - } - return 0 -} - -type Registration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Contact []string `protobuf:"bytes,3,rep,name=contact,proto3" json:"contact,omitempty"` - ContactsPresent bool `protobuf:"varint,4,opt,name=contactsPresent,proto3" json:"contactsPresent,omitempty"` - Agreement string `protobuf:"bytes,5,opt,name=agreement,proto3" json:"agreement,omitempty"` - InitialIP []byte `protobuf:"bytes,6,opt,name=initialIP,proto3" json:"initialIP,omitempty"` - CreatedAt int64 `protobuf:"varint,7,opt,name=createdAt,proto3" json:"createdAt,omitempty"` // Unix timestamp (nanoseconds) - Status string `protobuf:"bytes,8,opt,name=status,proto3" json:"status,omitempty"` -} - -func (x *Registration) Reset() { - *x = Registration{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Registration) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Registration) ProtoMessage() {} - -func (x *Registration) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Registration.ProtoReflect.Descriptor instead. -func (*Registration) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{5} -} - -func (x *Registration) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *Registration) GetKey() []byte { - if x != nil { - return x.Key - } - return nil -} - -func (x *Registration) GetContact() []string { - if x != nil { - return x.Contact - } - return nil -} - -func (x *Registration) GetContactsPresent() bool { - if x != nil { - return x.ContactsPresent - } - return false -} - -func (x *Registration) GetAgreement() string { - if x != nil { - return x.Agreement - } - return "" -} - -func (x *Registration) GetInitialIP() []byte { - if x != nil { - return x.InitialIP - } - return nil -} - -func (x *Registration) GetCreatedAt() int64 { - if x != nil { - return x.CreatedAt - } - return 0 -} - -func (x *Registration) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -type Authorization struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Identifier string `protobuf:"bytes,2,opt,name=identifier,proto3" json:"identifier,omitempty"` - RegistrationID int64 `protobuf:"varint,3,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` - Expires int64 `protobuf:"varint,5,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds) - Challenges []*Challenge `protobuf:"bytes,6,rep,name=challenges,proto3" json:"challenges,omitempty"` -} - -func (x *Authorization) Reset() { - *x = Authorization{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Authorization) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Authorization) ProtoMessage() {} - -func (x *Authorization) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Authorization.ProtoReflect.Descriptor instead. -func (*Authorization) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{6} -} - -func (x *Authorization) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *Authorization) GetIdentifier() string { - if x != nil { - return x.Identifier - } - return "" -} - -func (x *Authorization) GetRegistrationID() int64 { - if x != nil { - return x.RegistrationID - } - return 0 -} - -func (x *Authorization) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *Authorization) GetExpires() int64 { - if x != nil { - return x.Expires - } - return 0 -} - -func (x *Authorization) GetChallenges() []*Challenge { - if x != nil { - return x.Challenges - } - return nil -} - -type Order struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Expires int64 `protobuf:"varint,3,opt,name=expires,proto3" json:"expires,omitempty"` - Error *ProblemDetails `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` - CertificateSerial string `protobuf:"bytes,5,opt,name=certificateSerial,proto3" json:"certificateSerial,omitempty"` - Status string `protobuf:"bytes,7,opt,name=status,proto3" json:"status,omitempty"` - Names []string `protobuf:"bytes,8,rep,name=names,proto3" json:"names,omitempty"` - BeganProcessing bool `protobuf:"varint,9,opt,name=beganProcessing,proto3" json:"beganProcessing,omitempty"` - Created int64 `protobuf:"varint,10,opt,name=created,proto3" json:"created,omitempty"` - V2Authorizations []int64 `protobuf:"varint,11,rep,packed,name=v2Authorizations,proto3" json:"v2Authorizations,omitempty"` -} - -func (x *Order) Reset() { - *x = Order{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Order) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Order) ProtoMessage() {} - -func (x *Order) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Order.ProtoReflect.Descriptor instead. -func (*Order) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{7} -} - -func (x *Order) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *Order) GetRegistrationID() int64 { - if x != nil { - return x.RegistrationID - } - return 0 -} - -func (x *Order) GetExpires() int64 { - if x != nil { - return x.Expires - } - return 0 -} - -func (x *Order) GetError() *ProblemDetails { - if x != nil { - return x.Error - } - return nil -} - -func (x *Order) GetCertificateSerial() string { - if x != nil { - return x.CertificateSerial - } - return "" -} - -func (x *Order) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *Order) GetNames() []string { - if x != nil { - return x.Names - } - return nil -} - -func (x *Order) GetBeganProcessing() bool { - if x != nil { - return x.BeganProcessing - } - return false -} - -func (x *Order) GetCreated() int64 { - if x != nil { - return x.Created - } - return 0 -} - -func (x *Order) GetV2Authorizations() []int64 { - if x != nil { - return x.V2Authorizations - } - return nil -} - -type CRLEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` - Reason int32 `protobuf:"varint,2,opt,name=reason,proto3" json:"reason,omitempty"` - RevokedAt int64 `protobuf:"varint,3,opt,name=revokedAt,proto3" json:"revokedAt,omitempty"` // Unix timestamp (nanoseconds) -} - -func (x *CRLEntry) Reset() { - *x = CRLEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CRLEntry) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CRLEntry) ProtoMessage() {} - -func (x *CRLEntry) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CRLEntry.ProtoReflect.Descriptor instead. -func (*CRLEntry) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{8} -} - -func (x *CRLEntry) GetSerial() string { - if x != nil { - return x.Serial - } - return "" -} - -func (x *CRLEntry) GetReason() int32 { - if x != nil { - return x.Reason - } - return 0 -} - -func (x *CRLEntry) GetRevokedAt() int64 { - if x != nil { - return x.RevokedAt - } - return 0 -} - -var File_core_proto protoreflect.FileDescriptor - -var file_core_proto_rawDesc = []byte{ - 0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x63, 0x6f, - 0x72, 0x65, 0x22, 0xab, 0x02, 0x0a, 0x09, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x0a, 0x03, - 0x75, 0x72, 0x69, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x14, - 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2a, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, - 0x6b, 0x65, 0x79, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x44, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x72, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x72, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, - 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, - 0x22, 0xee, 0x01, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x11, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, - 0x76, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x55, 0x73, - 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x54, 0x72, 0x69, 0x65, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, - 0x0c, 0x52, 0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x54, 0x72, 0x69, 0x65, - 0x64, 0x22, 0x6a, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x54, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x62, 0x6c, 0x65, - 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x1e, 0x0a, - 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xa9, 0x01, - 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a, - 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, - 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, - 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, - 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x12, - 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x22, 0xeb, 0x02, 0x0a, 0x11, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x28, 0x0a, 0x0f, 0x6f, 0x63, 0x73, 0x70, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x6f, 0x63, 0x73, 0x70, 0x4c, 0x61, - 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x76, - 0x6f, 0x6b, 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, - 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x72, - 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, - 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x67, 0x53, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x61, 0x67, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x63, 0x73, 0x70, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6f, - 0x63, 0x73, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6e, - 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6e, - 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, 0x45, 0x78, 0x70, - 0x69, 0x72, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x45, 0x78, - 0x70, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, - 0x44, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, - 0x44, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0xe6, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73, - 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x12, 0x1c, - 0x0a, 0x09, 0x61, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x61, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x49, 0x50, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x09, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x49, 0x50, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x22, 0xd6, 0x01, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x0a, - 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, - 0x65, 0x52, 0x0a, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x73, 0x4a, 0x04, 0x08, - 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0xd7, 0x02, 0x0a, 0x05, 0x4f, 0x72, - 0x64, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, - 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62, - 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x12, 0x2c, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x28, 0x0a, - 0x0f, 0x62, 0x65, 0x67, 0x61, 0x6e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x62, 0x65, 0x67, 0x61, 0x6e, 0x50, 0x72, 0x6f, - 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x03, 0x52, 0x10, 0x76, 0x32, 0x41, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08, - 0x06, 0x10, 0x07, 0x22, 0x58, 0x0a, 0x08, 0x43, 0x52, 0x4c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, - 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x42, 0x2b, 0x5a, - 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, - 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, - 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_core_proto_rawDescOnce sync.Once - file_core_proto_rawDescData = file_core_proto_rawDesc -) - -func file_core_proto_rawDescGZIP() []byte { - file_core_proto_rawDescOnce.Do(func() { - file_core_proto_rawDescData = protoimpl.X.CompressGZIP(file_core_proto_rawDescData) - }) - return file_core_proto_rawDescData -} - -var file_core_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_core_proto_goTypes = []interface{}{ - (*Challenge)(nil), // 0: core.Challenge - (*ValidationRecord)(nil), // 1: core.ValidationRecord - (*ProblemDetails)(nil), // 2: core.ProblemDetails - (*Certificate)(nil), // 3: core.Certificate - (*CertificateStatus)(nil), // 4: core.CertificateStatus - (*Registration)(nil), // 5: core.Registration - (*Authorization)(nil), // 6: core.Authorization - (*Order)(nil), // 7: core.Order - (*CRLEntry)(nil), // 8: core.CRLEntry -} -var file_core_proto_depIdxs = []int32{ - 1, // 0: core.Challenge.validationrecords:type_name -> core.ValidationRecord - 2, // 1: core.Challenge.error:type_name -> core.ProblemDetails - 0, // 2: core.Authorization.challenges:type_name -> core.Challenge - 2, // 3: core.Order.error:type_name -> core.ProblemDetails - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name -} - -func init() { file_core_proto_init() } -func file_core_proto_init() { - if File_core_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_core_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Challenge); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidationRecord); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProblemDetails); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Certificate); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CertificateStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Registration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Authorization); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Order); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CRLEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_core_proto_rawDesc, - NumEnums: 0, - NumMessages: 9, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_core_proto_goTypes, - DependencyIndexes: file_core_proto_depIdxs, - MessageInfos: file_core_proto_msgTypes, - }.Build() - File_core_proto = out.File - file_core_proto_rawDesc = nil - file_core_proto_goTypes = nil - file_core_proto_depIdxs = nil -} diff --git a/vendor/github.com/letsencrypt/boulder/core/proto/core.proto b/vendor/github.com/letsencrypt/boulder/core/proto/core.proto deleted file mode 100644 index 946bb16c875..00000000000 --- a/vendor/github.com/letsencrypt/boulder/core/proto/core.proto +++ /dev/null @@ -1,101 +0,0 @@ -syntax = "proto3"; - -package core; -option go_package = "github.com/letsencrypt/boulder/core/proto"; - -message Challenge { - int64 id = 1; - string type = 2; - string status = 6; - string uri = 9; - string token = 3; - string keyAuthorization = 5; - repeated ValidationRecord validationrecords = 10; - ProblemDetails error = 7; - int64 validated = 11; -} - -message ValidationRecord { - string hostname = 1; - string port = 2; - repeated bytes addressesResolved = 3; // net.IP.MarshalText() - bytes addressUsed = 4; // net.IP.MarshalText() - - repeated string authorities = 5; - string url = 6; - // A list of addresses tried before the address used (see - // core/objects.go and the comment on the ValidationRecord structure - // definition for more information. - repeated bytes addressesTried = 7; // net.IP.MarshalText() -} - -message ProblemDetails { - string problemType = 1; - string detail = 2; - int32 httpStatus = 3; -} - -message Certificate { - int64 registrationID = 1; - string serial = 2; - string digest = 3; - bytes der = 4; - int64 issued = 5; // Unix timestamp (nanoseconds) - int64 expires = 6; // Unix timestamp (nanoseconds) -} - -message CertificateStatus { - string serial = 1; - reserved 2; // previously subscriberApproved - string status = 3; - int64 ocspLastUpdated = 4; - int64 revokedDate = 5; - int64 revokedReason = 6; - int64 lastExpirationNagSent = 7; - bytes ocspResponse = 8; - int64 notAfter = 9; - bool isExpired = 10; - int64 issuerID = 11; -} - -message Registration { - int64 id = 1; - bytes key = 2; - repeated string contact = 3; - bool contactsPresent = 4; - string agreement = 5; - bytes initialIP = 6; - int64 createdAt = 7; // Unix timestamp (nanoseconds) - string status = 8; -} - -message Authorization { - string id = 1; - string identifier = 2; - int64 registrationID = 3; - string status = 4; - int64 expires = 5; // Unix timestamp (nanoseconds) - repeated core.Challenge challenges = 6; - reserved 7; // previously combinations - reserved 8; // previously v2 -} - -message Order { - int64 id = 1; - int64 registrationID = 2; - int64 expires = 3; - ProblemDetails error = 4; - string certificateSerial = 5; - reserved 6; // previously authorizations, deprecated in favor of v2Authorizations - string status = 7; - repeated string names = 8; - bool beganProcessing = 9; - int64 created = 10; - repeated int64 v2Authorizations = 11; -} - -message CRLEntry { - string serial = 1; - int32 reason = 2; - int64 revokedAt = 3; // Unix timestamp (nanoseconds) -} diff --git a/vendor/github.com/letsencrypt/boulder/core/util.go b/vendor/github.com/letsencrypt/boulder/core/util.go index b9ac7047aa1..d7fe0266895 100644 --- a/vendor/github.com/letsencrypt/boulder/core/util.go +++ b/vendor/github.com/letsencrypt/boulder/core/util.go @@ -1,9 +1,10 @@ package core import ( - "bytes" "crypto" + "crypto/ecdsa" "crypto/rand" + "crypto/rsa" "crypto/sha256" "crypto/x509" "encoding/base64" @@ -16,6 +17,7 @@ import ( "math/big" mrand "math/rand" "os" + "path" "reflect" "regexp" "sort" @@ -23,7 +25,7 @@ import ( "time" "unicode" - jose "gopkg.in/square/go-jose.v2" + "gopkg.in/go-jose/go-jose.v2" ) const Unspecified = "Unspecified" @@ -96,7 +98,7 @@ func KeyDigest(key crypto.PublicKey) (Sha256Digest, error) { switch t := key.(type) { case *jose.JSONWebKey: if t == nil { - return Sha256Digest{}, fmt.Errorf("Cannot compute digest of nil key") + return Sha256Digest{}, errors.New("cannot compute digest of nil key") } return KeyDigest(t.Key) case jose.JSONWebKey: @@ -132,21 +134,16 @@ func KeyDigestEquals(j, k crypto.PublicKey) bool { return digestJ == digestK } -// PublicKeysEqual determines whether two public keys have the same marshalled -// bytes as one another -func PublicKeysEqual(a, b interface{}) (bool, error) { - if a == nil || b == nil { - return false, errors.New("One or more nil arguments to PublicKeysEqual") - } - aBytes, err := x509.MarshalPKIXPublicKey(a) - if err != nil { - return false, err - } - bBytes, err := x509.MarshalPKIXPublicKey(b) - if err != nil { - return false, err +// PublicKeysEqual determines whether two public keys are identical. +func PublicKeysEqual(a, b crypto.PublicKey) (bool, error) { + switch ak := a.(type) { + case *rsa.PublicKey: + return ak.Equal(b), nil + case *ecdsa.PublicKey: + return ak.Equal(b), nil + default: + return false, fmt.Errorf("unsupported public key type %T", ak) } - return bytes.Equal(aBytes, bBytes), nil } // SerialToString converts a certificate serial number (big.Int) to a String @@ -160,7 +157,7 @@ func SerialToString(serial *big.Int) string { func StringToSerial(serial string) (*big.Int, error) { var serialNum big.Int if !ValidSerial(serial) { - return &serialNum, errors.New("Invalid serial number") + return &serialNum, fmt.Errorf("invalid serial number %q", serial) } _, err := fmt.Sscanf(serial, "%036x", &serialNum) return &serialNum, err @@ -245,6 +242,14 @@ func UniqueLowerNames(names []string) (unique []string) { return } +// HashNames returns a hash of the names requested. This is intended for use +// when interacting with the orderFqdnSets table and rate limiting. +func HashNames(names []string) []byte { + names = UniqueLowerNames(names) + hash := sha256.Sum256([]byte(strings.Join(names, ","))) + return hash[:] +} + // LoadCert loads a PEM certificate specified by filename or returns an error func LoadCert(filename string) (*x509.Certificate, error) { certPEM, err := os.ReadFile(filename) @@ -253,7 +258,7 @@ func LoadCert(filename string) (*x509.Certificate, error) { } block, _ := pem.Decode(certPEM) if block == nil { - return nil, fmt.Errorf("No data in cert PEM file %s", filename) + return nil, fmt.Errorf("no data in cert PEM file %q", filename) } cert, err := x509.ParseCertificate(block.Bytes) if err != nil { @@ -298,3 +303,7 @@ func IsASCII(str string) bool { } return true } + +func Command() string { + return path.Base(os.Args[0]) +} diff --git a/vendor/github.com/letsencrypt/boulder/errors/errors.go b/vendor/github.com/letsencrypt/boulder/errors/errors.go deleted file mode 100644 index 50be1087a09..00000000000 --- a/vendor/github.com/letsencrypt/boulder/errors/errors.go +++ /dev/null @@ -1,194 +0,0 @@ -// Package errors provides internal-facing error types for use in Boulder. Many -// of these are transformed directly into Problem Details documents by the WFE. -// Some, like NotFound, may be handled internally. We avoid using Problem -// Details documents as part of our internal error system to avoid layering -// confusions. -// -// These errors are specifically for use in errors that cross RPC boundaries. -// An error type that does not need to be passed through an RPC can use a plain -// Go type locally. Our gRPC code is aware of these error types and will -// serialize and deserialize them automatically. -package errors - -import ( - "fmt" - "time" - - "github.com/letsencrypt/boulder/identifier" -) - -// ErrorType provides a coarse category for BoulderErrors. -// Objects of type ErrorType should never be directly returned by other -// functions; instead use the methods below to create an appropriate -// BoulderError wrapping one of these types. -type ErrorType int - -// These numeric constants are used when sending berrors through gRPC. -const ( - // InternalServer is deprecated. Instead, pass a plain Go error. That will get - // turned into a probs.InternalServerError by the WFE. - InternalServer ErrorType = iota - _ - Malformed - Unauthorized - NotFound - RateLimit - RejectedIdentifier - InvalidEmail - ConnectionFailure - _ // Reserved, previously WrongAuthorizationState - CAA - MissingSCTs - Duplicate - OrderNotReady - DNS - BadPublicKey - BadCSR - AlreadyRevoked - BadRevocationReason -) - -func (ErrorType) Error() string { - return "urn:ietf:params:acme:error" -} - -// BoulderError represents internal Boulder errors -type BoulderError struct { - Type ErrorType - Detail string - SubErrors []SubBoulderError - - // RetryAfter the duration a client should wait before retrying the request - // which resulted in this error. - RetryAfter time.Duration -} - -// SubBoulderError represents sub-errors specific to an identifier that are -// related to a top-level internal Boulder error. -type SubBoulderError struct { - *BoulderError - Identifier identifier.ACMEIdentifier -} - -func (be *BoulderError) Error() string { - return be.Detail -} - -func (be *BoulderError) Unwrap() error { - return be.Type -} - -// WithSubErrors returns a new BoulderError instance created by adding the -// provided subErrs to the existing BoulderError. -func (be *BoulderError) WithSubErrors(subErrs []SubBoulderError) *BoulderError { - return &BoulderError{ - Type: be.Type, - Detail: be.Detail, - SubErrors: append(be.SubErrors, subErrs...), - RetryAfter: be.RetryAfter, - } -} - -// New is a convenience function for creating a new BoulderError -func New(errType ErrorType, msg string, args ...interface{}) error { - return &BoulderError{ - Type: errType, - Detail: fmt.Sprintf(msg, args...), - } -} - -func InternalServerError(msg string, args ...interface{}) error { - return New(InternalServer, msg, args...) -} - -func MalformedError(msg string, args ...interface{}) error { - return New(Malformed, msg, args...) -} - -func UnauthorizedError(msg string, args ...interface{}) error { - return New(Unauthorized, msg, args...) -} - -func NotFoundError(msg string, args ...interface{}) error { - return New(NotFound, msg, args...) -} - -func RateLimitError(retryAfter time.Duration, msg string, args ...interface{}) error { - return &BoulderError{ - Type: RateLimit, - Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/", args...), - RetryAfter: retryAfter, - } -} - -func DuplicateCertificateError(retryAfter time.Duration, msg string, args ...interface{}) error { - return &BoulderError{ - Type: RateLimit, - Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/duplicate-certificate-limit/", args...), - RetryAfter: retryAfter, - } -} - -func FailedValidationError(retryAfter time.Duration, msg string, args ...interface{}) error { - return &BoulderError{ - Type: RateLimit, - Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/failed-validation-limit/", args...), - RetryAfter: retryAfter, - } -} - -func RegistrationsPerIPError(retryAfter time.Duration, msg string, args ...interface{}) error { - return &BoulderError{ - Type: RateLimit, - Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/too-many-registrations-for-this-ip/", args...), - RetryAfter: retryAfter, - } -} - -func RejectedIdentifierError(msg string, args ...interface{}) error { - return New(RejectedIdentifier, msg, args...) -} - -func InvalidEmailError(msg string, args ...interface{}) error { - return New(InvalidEmail, msg, args...) -} - -func ConnectionFailureError(msg string, args ...interface{}) error { - return New(ConnectionFailure, msg, args...) -} - -func CAAError(msg string, args ...interface{}) error { - return New(CAA, msg, args...) -} - -func MissingSCTsError(msg string, args ...interface{}) error { - return New(MissingSCTs, msg, args...) -} - -func DuplicateError(msg string, args ...interface{}) error { - return New(Duplicate, msg, args...) -} - -func OrderNotReadyError(msg string, args ...interface{}) error { - return New(OrderNotReady, msg, args...) -} - -func DNSError(msg string, args ...interface{}) error { - return New(DNS, msg, args...) -} - -func BadPublicKeyError(msg string, args ...interface{}) error { - return New(BadPublicKey, msg, args...) -} - -func BadCSRError(msg string, args ...interface{}) error { - return New(BadCSR, msg, args...) -} - -func AlreadyRevokedError(msg string, args ...interface{}) error { - return New(AlreadyRevoked, msg, args...) -} - -func BadRevocationReasonError(reason int64) error { - return New(BadRevocationReason, "disallowed revocation reason: %d", reason) -} diff --git a/vendor/github.com/letsencrypt/boulder/features/featureflag_string.go b/vendor/github.com/letsencrypt/boulder/features/featureflag_string.go deleted file mode 100644 index 7eadc1f9bf6..00000000000 --- a/vendor/github.com/letsencrypt/boulder/features/featureflag_string.go +++ /dev/null @@ -1,56 +0,0 @@ -// Code generated by "stringer -type=FeatureFlag"; DO NOT EDIT. - -package features - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[unused-0] - _ = x[PrecertificateRevocation-1] - _ = x[StripDefaultSchemePort-2] - _ = x[NonCFSSLSigner-3] - _ = x[StoreIssuerInfo-4] - _ = x[StreamlineOrderAndAuthzs-5] - _ = x[V1DisableNewValidations-6] - _ = x[ExpirationMailerDontLookTwice-7] - _ = x[OldTLSInbound-8] - _ = x[OldTLSOutbound-9] - _ = x[ROCSPStage1-10] - _ = x[ROCSPStage2-11] - _ = x[ROCSPStage3-12] - _ = x[CAAValidationMethods-13] - _ = x[CAAAccountURI-14] - _ = x[EnforceMultiVA-15] - _ = x[MultiVAFullResults-16] - _ = x[MandatoryPOSTAsGET-17] - _ = x[AllowV1Registration-18] - _ = x[StoreRevokerInfo-19] - _ = x[RestrictRSAKeySizes-20] - _ = x[FasterNewOrdersRateLimit-21] - _ = x[ECDSAForAll-22] - _ = x[ServeRenewalInfo-23] - _ = x[GetAuthzReadOnly-24] - _ = x[GetAuthzUseIndex-25] - _ = x[CheckFailedAuthorizationsFirst-26] - _ = x[AllowReRevocation-27] - _ = x[MozRevocationReasons-28] - _ = x[SHA1CSRs-29] - _ = x[AllowUnrecognizedFeatures-30] - _ = x[RejectDuplicateCSRExtensions-31] - _ = x[ROCSPStage6-32] - _ = x[ROCSPStage7-33] -} - -const _FeatureFlag_name = "unusedPrecertificateRevocationStripDefaultSchemePortNonCFSSLSignerStoreIssuerInfoStreamlineOrderAndAuthzsV1DisableNewValidationsExpirationMailerDontLookTwiceOldTLSInboundOldTLSOutboundROCSPStage1ROCSPStage2ROCSPStage3CAAValidationMethodsCAAAccountURIEnforceMultiVAMultiVAFullResultsMandatoryPOSTAsGETAllowV1RegistrationStoreRevokerInfoRestrictRSAKeySizesFasterNewOrdersRateLimitECDSAForAllServeRenewalInfoGetAuthzReadOnlyGetAuthzUseIndexCheckFailedAuthorizationsFirstAllowReRevocationMozRevocationReasonsSHA1CSRsAllowUnrecognizedFeaturesRejectDuplicateCSRExtensionsROCSPStage6ROCSPStage7" - -var _FeatureFlag_index = [...]uint16{0, 6, 30, 52, 66, 81, 105, 128, 157, 170, 184, 195, 206, 217, 237, 250, 264, 282, 300, 319, 335, 354, 378, 389, 405, 421, 437, 467, 484, 504, 512, 537, 565, 576, 587} - -func (i FeatureFlag) String() string { - if i < 0 || i >= FeatureFlag(len(_FeatureFlag_index)-1) { - return "FeatureFlag(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _FeatureFlag_name[_FeatureFlag_index[i]:_FeatureFlag_index[i+1]] -} diff --git a/vendor/github.com/letsencrypt/boulder/features/features.go b/vendor/github.com/letsencrypt/boulder/features/features.go deleted file mode 100644 index 6db20d7deac..00000000000 --- a/vendor/github.com/letsencrypt/boulder/features/features.go +++ /dev/null @@ -1,203 +0,0 @@ -//go:generate stringer -type=FeatureFlag - -package features - -import ( - "fmt" - "strings" - "sync" -) - -type FeatureFlag int - -const ( - unused FeatureFlag = iota // unused is used for testing - // Deprecated features, these can be removed once stripped from production configs - PrecertificateRevocation - StripDefaultSchemePort - NonCFSSLSigner - StoreIssuerInfo - StreamlineOrderAndAuthzs - V1DisableNewValidations - ExpirationMailerDontLookTwice - OldTLSInbound - OldTLSOutbound - ROCSPStage1 - ROCSPStage2 - ROCSPStage3 - - // Currently in-use features - // Check CAA and respect validationmethods parameter. - CAAValidationMethods - // Check CAA and respect accounturi parameter. - CAAAccountURI - // EnforceMultiVA causes the VA to block on remote VA PerformValidation - // requests in order to make a valid/invalid decision with the results. - EnforceMultiVA - // MultiVAFullResults will cause the main VA to wait for all of the remote VA - // results, not just the threshold required to make a decision. - MultiVAFullResults - // MandatoryPOSTAsGET forbids legacy unauthenticated GET requests for ACME - // resources. - MandatoryPOSTAsGET - // Allow creation of new registrations in ACMEv1. - AllowV1Registration - // StoreRevokerInfo enables storage of the revoker and a bool indicating if the row - // was checked for extant unrevoked certificates in the blockedKeys table. - StoreRevokerInfo - // RestrictRSAKeySizes enables restriction of acceptable RSA public key moduli to - // the common sizes (2048, 3072, and 4096 bits). - RestrictRSAKeySizes - // FasterNewOrdersRateLimit enables use of a separate table for counting the - // new orders rate limit. - FasterNewOrdersRateLimit - // ECDSAForAll enables all accounts, regardless of their presence in the CA's - // ecdsaAllowedAccounts config value, to get issuance from ECDSA issuers. - ECDSAForAll - // ServeRenewalInfo exposes the renewalInfo endpoint in the directory and for - // GET requests. WARNING: This feature is a draft and highly unstable. - ServeRenewalInfo - // GetAuthzReadOnly causes the SA to use its read-only database connection - // (which is generally pointed at a replica rather than the primary db) when - // querying the authz2 table. - GetAuthzReadOnly - // GetAuthzUseIndex causes the SA to use to add a USE INDEX hint when it - // queries the authz2 table. - GetAuthzUseIndex - // Check the failed authorization limit before doing authz reuse. - CheckFailedAuthorizationsFirst - // AllowReRevocation causes the RA to allow the revocation reason of an - // already-revoked certificate to be updated to `keyCompromise` from any - // other reason if that compromise is demonstrated by making the second - // revocation request signed by the certificate keypair. - AllowReRevocation - // MozRevocationReasons causes the RA to enforce the following upcoming - // Mozilla policies regarding revocation: - // - A subscriber can request that their certificate be revoked with reason - // keyCompromise, even without demonstrating that compromise at the time. - // However, the cert's pubkey will not be added to the blocked keys list. - // - When an applicant other than the original subscriber requests that a - // certificate be revoked (by demonstrating control over all names in it), - // the cert will be revoked with reason cessationOfOperation, regardless of - // what revocation reason they request. - // - When anyone requests that a certificate be revoked by signing the request - // with the certificate's keypair, the cert will be revoked with reason - // keyCompromise, regardless of what revocation reason they request. - MozRevocationReasons - // SHA1CSRs controls whether the /acme/finalize endpoint rejects CSRs that - // are self-signed using SHA1. - SHA1CSRs - // AllowUnrecognizedFeatures is internal to the features package: if true, - // skip error when unrecognized feature flag names are passed. - AllowUnrecognizedFeatures - // RejectDuplicateCSRExtensions enables verification that submitted CSRs do - // not contain duplicate extensions. This behavior will be on by default in - // go1.19. - RejectDuplicateCSRExtensions - - // ROCSPStage6 disables writing full OCSP Responses to MariaDB during - // (pre)certificate issuance and during revocation. Because Stage 4 involved - // disabling ocsp-updater, this means that no ocsp response bytes will be - // written to the database anymore. - ROCSPStage6 - // ROCSPStage7 disables generating OCSP responses during issuance and - // revocation. This affects codepaths in both the RA (revocation) and the CA - // (precert "birth certificates"). - ROCSPStage7 -) - -// List of features and their default value, protected by fMu -var features = map[FeatureFlag]bool{ - unused: false, - CAAValidationMethods: false, - CAAAccountURI: false, - EnforceMultiVA: false, - MultiVAFullResults: false, - MandatoryPOSTAsGET: false, - AllowV1Registration: true, - V1DisableNewValidations: false, - PrecertificateRevocation: false, - StripDefaultSchemePort: false, - StoreIssuerInfo: false, - StoreRevokerInfo: false, - RestrictRSAKeySizes: false, - FasterNewOrdersRateLimit: false, - NonCFSSLSigner: false, - ECDSAForAll: false, - StreamlineOrderAndAuthzs: false, - ServeRenewalInfo: false, - GetAuthzReadOnly: false, - GetAuthzUseIndex: false, - CheckFailedAuthorizationsFirst: false, - AllowReRevocation: false, - MozRevocationReasons: false, - OldTLSOutbound: true, - OldTLSInbound: true, - SHA1CSRs: true, - AllowUnrecognizedFeatures: false, - ExpirationMailerDontLookTwice: false, - RejectDuplicateCSRExtensions: false, - ROCSPStage1: false, - ROCSPStage2: false, - ROCSPStage3: false, - ROCSPStage6: false, - ROCSPStage7: false, -} - -var fMu = new(sync.RWMutex) - -var initial = map[FeatureFlag]bool{} - -var nameToFeature = make(map[string]FeatureFlag, len(features)) - -func init() { - for f, v := range features { - nameToFeature[f.String()] = f - initial[f] = v - } -} - -// Set accepts a list of features and whether they should -// be enabled or disabled. In the presence of unrecognized -// flags, it will return an error or not depending on the -// value of AllowUnrecognizedFeatures. -func Set(featureSet map[string]bool) error { - fMu.Lock() - defer fMu.Unlock() - var unknown []string - for n, v := range featureSet { - f, present := nameToFeature[n] - if present { - features[f] = v - } else { - unknown = append(unknown, n) - } - } - if len(unknown) > 0 && !features[AllowUnrecognizedFeatures] { - return fmt.Errorf("unrecognized feature flag names: %s", - strings.Join(unknown, ", ")) - } - return nil -} - -// Enabled returns true if the feature is enabled or false -// if it isn't, it will panic if passed a feature that it -// doesn't know. -func Enabled(n FeatureFlag) bool { - fMu.RLock() - defer fMu.RUnlock() - v, present := features[n] - if !present { - panic(fmt.Sprintf("feature '%s' doesn't exist", n.String())) - } - return v -} - -// Reset resets the features to their initial state -func Reset() { - fMu.Lock() - defer fMu.Unlock() - for k, v := range initial { - features[k] = v - } -} diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go b/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go index fdcfe9a181e..198c09db4ed 100644 --- a/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go +++ b/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go @@ -9,8 +9,7 @@ import ( "os" "github.com/letsencrypt/boulder/core" - - yaml "gopkg.in/yaml.v3" + "github.com/letsencrypt/boulder/strictyaml" ) // blockedKeys is a type for maintaining a map of SHA256 hashes @@ -58,7 +57,7 @@ func loadBlockedKeysList(filename string) (*blockedKeys, error) { BlockedHashes []string `yaml:"blocked"` BlockedHashesHex []string `yaml:"blockedHashesHex"` } - err = yaml.Unmarshal(yamlBytes, &list) + err = strictyaml.Unmarshal(yamlBytes, &list) if err != nil { return nil, err } diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go index b751c376cd1..087a0181232 100644 --- a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go +++ b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go @@ -12,10 +12,6 @@ import ( "sync" "github.com/letsencrypt/boulder/core" - berrors "github.com/letsencrypt/boulder/errors" - "github.com/letsencrypt/boulder/features" - sapb "github.com/letsencrypt/boulder/sa/proto" - "google.golang.org/grpc" "github.com/titanous/rocacheck" ) @@ -68,10 +64,12 @@ func badKey(msg string, args ...interface{}) error { return fmt.Errorf("%w%s", ErrBadKey, fmt.Errorf(msg, args...)) } -// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey method to KeyPolicy, -// rather than storing a full sa.SQLStorageAuthority. This makes testing +// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey functionality to KeyPolicy, +// rather than storing a full sa.SQLStorageAuthority. This allows external +// users who don’t want to import all of boulder/sa, and makes testing // significantly simpler. -type BlockedKeyCheckFunc func(context.Context, *sapb.KeyBlockedRequest, ...grpc.CallOption) (*sapb.Exists, error) +// On success, the function returns a boolean which is true if the key is blocked. +type BlockedKeyCheckFunc func(ctx context.Context, keyHash []byte) (bool, error) // KeyPolicy determines which types of key may be used with various boulder // operations. @@ -82,7 +80,7 @@ type KeyPolicy struct { weakRSAList *WeakRSAKeys blockedList *blockedKeys fermatRounds int - dbCheck BlockedKeyCheckFunc + blockedCheck BlockedKeyCheckFunc } // NewKeyPolicy returns a KeyPolicy that allows RSA, ECDSA256 and ECDSA384. @@ -97,7 +95,7 @@ func NewKeyPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) { AllowRSA: true, AllowECDSANISTP256: true, AllowECDSANISTP384: true, - dbCheck: bkc, + blockedCheck: bkc, } if config.WeakKeyFile != "" { keyList, err := LoadWeakRSASuffixes(config.WeakKeyFile) @@ -137,20 +135,20 @@ func (policy *KeyPolicy) GoodKey(ctx context.Context, key crypto.PublicKey) erro // that has been administratively blocked. if policy.blockedList != nil { if blocked, err := policy.blockedList.blocked(key); err != nil { - return berrors.InternalServerError("error checking blocklist for key: %v", key) + return fmt.Errorf("error checking blocklist for key: %v", key) } else if blocked { return badKey("public key is forbidden") } } - if policy.dbCheck != nil { + if policy.blockedCheck != nil { digest, err := core.KeyDigest(key) if err != nil { return badKey("%w", err) } - exists, err := policy.dbCheck(ctx, &sapb.KeyBlockedRequest{KeyHash: digest[:]}) + exists, err := policy.blockedCheck(ctx, digest[:]) if err != nil { return err - } else if exists.Exists { + } else if exists { return badKey("public key is forbidden") } } @@ -275,6 +273,12 @@ func (policy *KeyPolicy) goodCurve(c elliptic.Curve) (err error) { } } +// Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple +// of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes +// Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which +// have a known method to easily compute their private key, such as Debian Weak +// Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at +// common key sizes, so we restrict all issuance to those common key sizes. var acceptableRSAKeySizes = map[int]bool{ 2048: true, 3072: true, @@ -290,27 +294,12 @@ func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) { return badKey("key is on a known weak RSA key list") } - // Baseline Requirements Appendix A - // Modulus must be >= 2048 bits and <= 4096 bits modulus := key.N + + // See comment on acceptableRSAKeySizes above. modulusBitLen := modulus.BitLen() - if features.Enabled(features.RestrictRSAKeySizes) { - if !acceptableRSAKeySizes[modulusBitLen] { - return badKey("key size not supported: %d", modulusBitLen) - } - } else { - const maxKeySize = 4096 - if modulusBitLen < 2048 { - return badKey("key too small: %d", modulusBitLen) - } - if modulusBitLen > maxKeySize { - return badKey("key too large: %d > %d", modulusBitLen, maxKeySize) - } - // Bit lengths that are not a multiple of 8 may cause problems on some - // client implementations. - if modulusBitLen%8 != 0 { - return badKey("key length wasn't a multiple of 8: %d", modulusBitLen) - } + if !acceptableRSAKeySizes[modulusBitLen] { + return badKey("key size not supported: %d", modulusBitLen) } // Rather than support arbitrary exponents, which significantly increases diff --git a/vendor/github.com/letsencrypt/boulder/probs/probs.go b/vendor/github.com/letsencrypt/boulder/probs/probs.go index 3736e8d391e..2cc766237dc 100644 --- a/vendor/github.com/letsencrypt/boulder/probs/probs.go +++ b/vendor/github.com/letsencrypt/boulder/probs/probs.go @@ -7,29 +7,33 @@ import ( "github.com/letsencrypt/boulder/identifier" ) -// Error types that can be used in ACME payloads const ( + // Error types that can be used in ACME payloads. These are sorted in the + // same order as they are defined in RFC8555 Section 6.7. We do not implement + // the `compound`, `externalAccountRequired`, or `userActionRequired` errors, + // because we have no path that would return them. + AccountDoesNotExistProblem = ProblemType("accountDoesNotExist") + AlreadyRevokedProblem = ProblemType("alreadyRevoked") + BadCSRProblem = ProblemType("badCSR") + BadNonceProblem = ProblemType("badNonce") + BadPublicKeyProblem = ProblemType("badPublicKey") + BadRevocationReasonProblem = ProblemType("badRevocationReason") + BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm") + CAAProblem = ProblemType("caa") ConnectionProblem = ProblemType("connection") + DNSProblem = ProblemType("dns") + InvalidContactProblem = ProblemType("invalidContact") MalformedProblem = ProblemType("malformed") + OrderNotReadyProblem = ProblemType("orderNotReady") + RateLimitedProblem = ProblemType("rateLimited") + RejectedIdentifierProblem = ProblemType("rejectedIdentifier") ServerInternalProblem = ProblemType("serverInternal") TLSProblem = ProblemType("tls") UnauthorizedProblem = ProblemType("unauthorized") - RateLimitedProblem = ProblemType("rateLimited") - BadNonceProblem = ProblemType("badNonce") - InvalidEmailProblem = ProblemType("invalidEmail") - RejectedIdentifierProblem = ProblemType("rejectedIdentifier") - AccountDoesNotExistProblem = ProblemType("accountDoesNotExist") - CAAProblem = ProblemType("caa") - DNSProblem = ProblemType("dns") - AlreadyRevokedProblem = ProblemType("alreadyRevoked") - OrderNotReadyProblem = ProblemType("orderNotReady") - BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm") - BadPublicKeyProblem = ProblemType("badPublicKey") - BadRevocationReasonProblem = ProblemType("badRevocationReason") - BadCSRProblem = ProblemType("badCSR") + UnsupportedContactProblem = ProblemType("unsupportedContact") + UnsupportedIdentifierProblem = ProblemType("unsupportedIdentifier") - V1ErrorNS = "urn:acme:error:" - V2ErrorNS = "urn:ietf:params:acme:error:" + ErrorNS = "urn:ietf:params:acme:error:" ) // ProblemType defines the error types in the ACME protocol @@ -71,112 +75,71 @@ func (pd *ProblemDetails) WithSubProblems(subProbs []SubProblemDetails) *Problem } } -// statusTooManyRequests is the HTTP status code meant for rate limiting -// errors. It's not currently in the net/http library so we add it here. -const statusTooManyRequests = 429 +// Helper functions which construct the basic RFC8555 Problem Documents, with +// the Type already set and the Details supplied by the caller. -// ProblemDetailsToStatusCode inspects the given ProblemDetails to figure out -// what HTTP status code it should represent. It should only be used by the WFE -// but is included in this package because of its reliance on ProblemTypes. -func ProblemDetailsToStatusCode(prob *ProblemDetails) int { - if prob.HTTPStatus != 0 { - return prob.HTTPStatus - } - switch prob.Type { - case - ConnectionProblem, - MalformedProblem, - BadSignatureAlgorithmProblem, - BadPublicKeyProblem, - TLSProblem, - BadNonceProblem, - InvalidEmailProblem, - RejectedIdentifierProblem, - AccountDoesNotExistProblem, - BadRevocationReasonProblem: - return http.StatusBadRequest - case ServerInternalProblem: - return http.StatusInternalServerError - case - UnauthorizedProblem, - CAAProblem: - return http.StatusForbidden - case RateLimitedProblem: - return statusTooManyRequests - default: - return http.StatusInternalServerError - } -} - -// BadNonce returns a ProblemDetails with a BadNonceProblem and a 400 Bad -// Request status code. -func BadNonce(detail string) *ProblemDetails { +// AccountDoesNotExist returns a ProblemDetails representing an +// AccountDoesNotExistProblem error +func AccountDoesNotExist(detail string) *ProblemDetails { return &ProblemDetails{ - Type: BadNonceProblem, + Type: AccountDoesNotExistProblem, Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad +// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad // Request status code. -func RejectedIdentifier(detail string) *ProblemDetails { +func AlreadyRevoked(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: RejectedIdentifierProblem, - Detail: detail, + Type: AlreadyRevokedProblem, + Detail: fmt.Sprintf(detail, a...), HTTPStatus: http.StatusBadRequest, } } -// Conflict returns a ProblemDetails with a MalformedProblem and a 409 Conflict -// status code. -func Conflict(detail string) *ProblemDetails { +// BadCSR returns a ProblemDetails representing a BadCSRProblem. +func BadCSR(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, - Detail: detail, - HTTPStatus: http.StatusConflict, + Type: BadCSRProblem, + Detail: fmt.Sprintf(detail, a...), + HTTPStatus: http.StatusBadRequest, } } -// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad +// BadNonce returns a ProblemDetails with a BadNonceProblem and a 400 Bad // Request status code. -func AlreadyRevoked(detail string, a ...interface{}) *ProblemDetails { +func BadNonce(detail string) *ProblemDetails { return &ProblemDetails{ - Type: AlreadyRevokedProblem, - Detail: fmt.Sprintf(detail, a...), + Type: BadNonceProblem, + Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad +// BadPublicKey returns a ProblemDetails with a BadPublicKeyProblem and a 400 Bad // Request status code. -func Malformed(detail string, args ...interface{}) *ProblemDetails { - if len(args) > 0 { - detail = fmt.Sprintf(detail, args...) - } +func BadPublicKey(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, - Detail: detail, + Type: BadPublicKeyProblem, + Detail: fmt.Sprintf(detail, a...), HTTPStatus: http.StatusBadRequest, } } -// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request -// Timeout status code. -func Canceled(detail string, args ...interface{}) *ProblemDetails { - if len(args) > 0 { - detail = fmt.Sprintf(detail, args...) - } +// BadRevocationReason returns a ProblemDetails representing +// a BadRevocationReasonProblem +func BadRevocationReason(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, - Detail: detail, - HTTPStatus: http.StatusRequestTimeout, + Type: BadRevocationReasonProblem, + Detail: fmt.Sprintf(detail, a...), + HTTPStatus: http.StatusBadRequest, } } // BadSignatureAlgorithm returns a ProblemDetails with a BadSignatureAlgorithmProblem // and a 400 Bad Request status code. -func BadSignatureAlgorithm(detail string, a ...interface{}) *ProblemDetails { +func BadSignatureAlgorithm(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ Type: BadSignatureAlgorithmProblem, Detail: fmt.Sprintf(detail, a...), @@ -184,166 +147,195 @@ func BadSignatureAlgorithm(detail string, a ...interface{}) *ProblemDetails { } } -// BadPublicKey returns a ProblemDetails with a BadPublicKeyProblem and a 400 Bad -// Request status code. -func BadPublicKey(detail string, a ...interface{}) *ProblemDetails { +// CAA returns a ProblemDetails representing a CAAProblem +func CAA(detail string) *ProblemDetails { return &ProblemDetails{ - Type: BadPublicKeyProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, + Type: CAAProblem, + Detail: detail, + HTTPStatus: http.StatusForbidden, } } -// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found -// status code. -func NotFound(detail string) *ProblemDetails { +// Connection returns a ProblemDetails representing a ConnectionProblem +// error +func Connection(detail string) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, + Type: ConnectionProblem, Detail: detail, - HTTPStatus: http.StatusNotFound, + HTTPStatus: http.StatusBadRequest, } } -// ServerInternal returns a ProblemDetails with a ServerInternalProblem and a -// 500 Internal Server Failure status code. -func ServerInternal(detail string) *ProblemDetails { +// DNS returns a ProblemDetails representing a DNSProblem +func DNS(detail string) *ProblemDetails { return &ProblemDetails{ - Type: ServerInternalProblem, + Type: DNSProblem, Detail: detail, - HTTPStatus: http.StatusInternalServerError, + HTTPStatus: http.StatusBadRequest, } } -// Unauthorized returns a ProblemDetails with an UnauthorizedProblem and a 403 -// Forbidden status code. -func Unauthorized(detail string) *ProblemDetails { +// InvalidContact returns a ProblemDetails representing an InvalidContactProblem. +func InvalidContact(detail string) *ProblemDetails { return &ProblemDetails{ - Type: UnauthorizedProblem, + Type: InvalidContactProblem, Detail: detail, - HTTPStatus: http.StatusForbidden, + HTTPStatus: http.StatusBadRequest, } } -// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP -// method error. -func MethodNotAllowed() *ProblemDetails { +// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad +// Request status code. +func Malformed(detail string, a ...any) *ProblemDetails { + if len(a) > 0 { + detail = fmt.Sprintf(detail, a...) + } return &ProblemDetails{ Type: MalformedProblem, - Detail: "Method not allowed", - HTTPStatus: http.StatusMethodNotAllowed, + Detail: detail, + HTTPStatus: http.StatusBadRequest, } } -// ContentLengthRequired returns a ProblemDetails representing a missing -// Content-Length header error -func ContentLengthRequired() *ProblemDetails { +// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem +func OrderNotReady(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, - Detail: "missing Content-Length header", - HTTPStatus: http.StatusLengthRequired, + Type: OrderNotReadyProblem, + Detail: fmt.Sprintf(detail, a...), + HTTPStatus: http.StatusForbidden, } } -// InvalidContentType returns a ProblemDetails suitable for a missing -// ContentType header, or an incorrect ContentType header -func InvalidContentType(detail string) *ProblemDetails { +// RateLimited returns a ProblemDetails representing a RateLimitedProblem error +func RateLimited(detail string) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, + Type: RateLimitedProblem, Detail: detail, - HTTPStatus: http.StatusUnsupportedMediaType, + HTTPStatus: http.StatusTooManyRequests, } } -// InvalidEmail returns a ProblemDetails representing an invalid email address -// error -func InvalidEmail(detail string) *ProblemDetails { +// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad +// Request status code. +func RejectedIdentifier(detail string) *ProblemDetails { return &ProblemDetails{ - Type: InvalidEmailProblem, + Type: RejectedIdentifierProblem, Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// ConnectionFailure returns a ProblemDetails representing a ConnectionProblem -// error -func ConnectionFailure(detail string) *ProblemDetails { +// ServerInternal returns a ProblemDetails with a ServerInternalProblem and a +// 500 Internal Server Failure status code. +func ServerInternal(detail string) *ProblemDetails { return &ProblemDetails{ - Type: ConnectionProblem, + Type: ServerInternalProblem, + Detail: detail, + HTTPStatus: http.StatusInternalServerError, + } +} + +// TLS returns a ProblemDetails representing a TLSProblem error +func TLS(detail string) *ProblemDetails { + return &ProblemDetails{ + Type: TLSProblem, Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// RateLimited returns a ProblemDetails representing a RateLimitedProblem error -func RateLimited(detail string) *ProblemDetails { +// Unauthorized returns a ProblemDetails with an UnauthorizedProblem and a 403 +// Forbidden status code. +func Unauthorized(detail string) *ProblemDetails { return &ProblemDetails{ - Type: RateLimitedProblem, + Type: UnauthorizedProblem, Detail: detail, - HTTPStatus: statusTooManyRequests, + HTTPStatus: http.StatusForbidden, } } -// TLSError returns a ProblemDetails representing a TLSProblem error -func TLSError(detail string) *ProblemDetails { +// UnsupportedContact returns a ProblemDetails representing an +// UnsupportedContactProblem +func UnsupportedContact(detail string) *ProblemDetails { return &ProblemDetails{ - Type: TLSProblem, + Type: UnsupportedContactProblem, Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// AccountDoesNotExist returns a ProblemDetails representing an -// AccountDoesNotExistProblem error -func AccountDoesNotExist(detail string) *ProblemDetails { +// UnsupportedIdentifier returns a ProblemDetails representing an +// UnsupportedIdentifierProblem +func UnsupportedIdentifier(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: AccountDoesNotExistProblem, - Detail: detail, + Type: UnsupportedIdentifierProblem, + Detail: fmt.Sprintf(detail, a...), HTTPStatus: http.StatusBadRequest, } } -// CAA returns a ProblemDetails representing a CAAProblem -func CAA(detail string) *ProblemDetails { +// Additional helper functions that return variations on MalformedProblem with +// different HTTP status codes set. + +// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request +// Timeout status code. +func Canceled(detail string, a ...any) *ProblemDetails { + if len(a) > 0 { + detail = fmt.Sprintf(detail, a...) + } return &ProblemDetails{ - Type: CAAProblem, + Type: MalformedProblem, Detail: detail, - HTTPStatus: http.StatusForbidden, + HTTPStatus: http.StatusRequestTimeout, } } -// DNS returns a ProblemDetails representing a DNSProblem -func DNS(detail string) *ProblemDetails { +// Conflict returns a ProblemDetails with a MalformedProblem and a 409 Conflict +// status code. +func Conflict(detail string) *ProblemDetails { return &ProblemDetails{ - Type: DNSProblem, + Type: MalformedProblem, Detail: detail, - HTTPStatus: http.StatusBadRequest, + HTTPStatus: http.StatusConflict, } } -// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem -func OrderNotReady(detail string, a ...interface{}) *ProblemDetails { +// ContentLengthRequired returns a ProblemDetails representing a missing +// Content-Length header error +func ContentLengthRequired() *ProblemDetails { return &ProblemDetails{ - Type: OrderNotReadyProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusForbidden, + Type: MalformedProblem, + Detail: "missing Content-Length header", + HTTPStatus: http.StatusLengthRequired, } } -// BadRevocationReason returns a ProblemDetails representing -// a BadRevocationReasonProblem -func BadRevocationReason(detail string, a ...interface{}) *ProblemDetails { +// InvalidContentType returns a ProblemDetails suitable for a missing +// ContentType header, or an incorrect ContentType header +func InvalidContentType(detail string) *ProblemDetails { return &ProblemDetails{ - Type: BadRevocationReasonProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, + Type: MalformedProblem, + Detail: detail, + HTTPStatus: http.StatusUnsupportedMediaType, } } -// BadCSR returns a ProblemDetails representing a BadCSRProblem. -func BadCSR(detail string, a ...interface{}) *ProblemDetails { +// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP +// method error. +func MethodNotAllowed() *ProblemDetails { return &ProblemDetails{ - Type: BadCSRProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, + Type: MalformedProblem, + Detail: "Method not allowed", + HTTPStatus: http.StatusMethodNotAllowed, + } +} + +// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found +// status code. +func NotFound(detail string) *ProblemDetails { + return &ProblemDetails{ + Type: MalformedProblem, + Detail: detail, + HTTPStatus: http.StatusNotFound, } } diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go b/vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go deleted file mode 100644 index 27c5d18b077..00000000000 --- a/vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go +++ /dev/null @@ -1,4261 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.20.1 -// source: sa.proto - -package proto - -import ( - proto "github.com/letsencrypt/boulder/core/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type RegistrationID struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` -} - -func (x *RegistrationID) Reset() { - *x = RegistrationID{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RegistrationID) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RegistrationID) ProtoMessage() {} - -func (x *RegistrationID) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RegistrationID.ProtoReflect.Descriptor instead. -func (*RegistrationID) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{0} -} - -func (x *RegistrationID) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -type JSONWebKey struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Jwk []byte `protobuf:"bytes,1,opt,name=jwk,proto3" json:"jwk,omitempty"` -} - -func (x *JSONWebKey) Reset() { - *x = JSONWebKey{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *JSONWebKey) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*JSONWebKey) ProtoMessage() {} - -func (x *JSONWebKey) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use JSONWebKey.ProtoReflect.Descriptor instead. -func (*JSONWebKey) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{1} -} - -func (x *JSONWebKey) GetJwk() []byte { - if x != nil { - return x.Jwk - } - return nil -} - -type AuthorizationID struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` -} - -func (x *AuthorizationID) Reset() { - *x = AuthorizationID{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuthorizationID) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuthorizationID) ProtoMessage() {} - -func (x *AuthorizationID) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuthorizationID.ProtoReflect.Descriptor instead. -func (*AuthorizationID) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{2} -} - -func (x *AuthorizationID) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -type GetPendingAuthorizationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - IdentifierType string `protobuf:"bytes,2,opt,name=identifierType,proto3" json:"identifierType,omitempty"` - IdentifierValue string `protobuf:"bytes,3,opt,name=identifierValue,proto3" json:"identifierValue,omitempty"` - // Result must be valid until at least this Unix timestamp (nanos) - ValidUntil int64 `protobuf:"varint,4,opt,name=validUntil,proto3" json:"validUntil,omitempty"` -} - -func (x *GetPendingAuthorizationRequest) Reset() { - *x = GetPendingAuthorizationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetPendingAuthorizationRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetPendingAuthorizationRequest) ProtoMessage() {} - -func (x *GetPendingAuthorizationRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetPendingAuthorizationRequest.ProtoReflect.Descriptor instead. -func (*GetPendingAuthorizationRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{3} -} - -func (x *GetPendingAuthorizationRequest) GetRegistrationID() int64 { - if x != nil { - return x.RegistrationID - } - return 0 -} - -func (x *GetPendingAuthorizationRequest) GetIdentifierType() string { - if x != nil { - return x.IdentifierType - } - return "" -} - -func (x *GetPendingAuthorizationRequest) GetIdentifierValue() string { - if x != nil { - return x.IdentifierValue - } - return "" -} - -func (x *GetPendingAuthorizationRequest) GetValidUntil() int64 { - if x != nil { - return x.ValidUntil - } - return 0 -} - -type GetValidAuthorizationsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"` - Now int64 `protobuf:"varint,3,opt,name=now,proto3" json:"now,omitempty"` // Unix timestamp (nanoseconds) -} - -func (x *GetValidAuthorizationsRequest) Reset() { - *x = GetValidAuthorizationsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetValidAuthorizationsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetValidAuthorizationsRequest) ProtoMessage() {} - -func (x *GetValidAuthorizationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetValidAuthorizationsRequest.ProtoReflect.Descriptor instead. -func (*GetValidAuthorizationsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{4} -} - -func (x *GetValidAuthorizationsRequest) GetRegistrationID() int64 { - if x != nil { - return x.RegistrationID - } - return 0 -} - -func (x *GetValidAuthorizationsRequest) GetDomains() []string { - if x != nil { - return x.Domains - } - return nil -} - -func (x *GetValidAuthorizationsRequest) GetNow() int64 { - if x != nil { - return x.Now - } - return 0 -} - -type ValidAuthorizations struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Valid []*ValidAuthorizations_MapElement `protobuf:"bytes,1,rep,name=valid,proto3" json:"valid,omitempty"` -} - -func (x *ValidAuthorizations) Reset() { - *x = ValidAuthorizations{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValidAuthorizations) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidAuthorizations) ProtoMessage() {} - -func (x *ValidAuthorizations) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidAuthorizations.ProtoReflect.Descriptor instead. -func (*ValidAuthorizations) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{5} -} - -func (x *ValidAuthorizations) GetValid() []*ValidAuthorizations_MapElement { - if x != nil { - return x.Valid - } - return nil -} - -type Serial struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` -} - -func (x *Serial) Reset() { - *x = Serial{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Serial) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Serial) ProtoMessage() {} - -func (x *Serial) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Serial.ProtoReflect.Descriptor instead. -func (*Serial) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{6} -} - -func (x *Serial) GetSerial() string { - if x != nil { - return x.Serial - } - return "" -} - -type SerialMetadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` - RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Created int64 `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` // Unix timestamp (nanoseconds) - Expires int64 `protobuf:"varint,4,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds) -} - -func (x *SerialMetadata) Reset() { - *x = SerialMetadata{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SerialMetadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SerialMetadata) ProtoMessage() {} - -func (x *SerialMetadata) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SerialMetadata.ProtoReflect.Descriptor instead. -func (*SerialMetadata) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{7} -} - -func (x *SerialMetadata) GetSerial() string { - if x != nil { - return x.Serial - } - return "" -} - -func (x *SerialMetadata) GetRegistrationID() int64 { - if x != nil { - return x.RegistrationID - } - return 0 -} - -func (x *SerialMetadata) GetCreated() int64 { - if x != nil { - return x.Created - } - return 0 -} - -func (x *SerialMetadata) GetExpires() int64 { - if x != nil { - return x.Expires - } - return 0 -} - -type Range struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Earliest int64 `protobuf:"varint,1,opt,name=earliest,proto3" json:"earliest,omitempty"` // Unix timestamp (nanoseconds) - Latest int64 `protobuf:"varint,2,opt,name=latest,proto3" json:"latest,omitempty"` // Unix timestamp (nanoseconds) -} - -func (x *Range) Reset() { - *x = Range{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Range) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Range) ProtoMessage() {} - -func (x *Range) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Range.ProtoReflect.Descriptor instead. -func (*Range) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{8} -} - -func (x *Range) GetEarliest() int64 { - if x != nil { - return x.Earliest - } - return 0 -} - -func (x *Range) GetLatest() int64 { - if x != nil { - return x.Latest - } - return 0 -} - -type Count struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` -} - -func (x *Count) Reset() { - *x = Count{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Count) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Count) ProtoMessage() {} - -func (x *Count) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Count.ProtoReflect.Descriptor instead. -func (*Count) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{9} -} - -func (x *Count) GetCount() int64 { - if x != nil { - return x.Count - } - return 0 -} - -type Timestamps struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Timestamps []int64 `protobuf:"varint,1,rep,packed,name=timestamps,proto3" json:"timestamps,omitempty"` // Unix timestamp (nanoseconds) -} - -func (x *Timestamps) Reset() { - *x = Timestamps{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Timestamps) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Timestamps) ProtoMessage() {} - -func (x *Timestamps) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Timestamps.ProtoReflect.Descriptor instead. -func (*Timestamps) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{10} -} - -func (x *Timestamps) GetTimestamps() []int64 { - if x != nil { - return x.Timestamps - } - return nil -} - -type CountCertificatesByNamesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Range *Range `protobuf:"bytes,1,opt,name=range,proto3" json:"range,omitempty"` - Names []string `protobuf:"bytes,2,rep,name=names,proto3" json:"names,omitempty"` -} - -func (x *CountCertificatesByNamesRequest) Reset() { - *x = CountCertificatesByNamesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CountCertificatesByNamesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CountCertificatesByNamesRequest) ProtoMessage() {} - -func (x *CountCertificatesByNamesRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CountCertificatesByNamesRequest.ProtoReflect.Descriptor instead. -func (*CountCertificatesByNamesRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{11} -} - -func (x *CountCertificatesByNamesRequest) GetRange() *Range { - if x != nil { - return x.Range - } - return nil -} - -func (x *CountCertificatesByNamesRequest) GetNames() []string { - if x != nil { - return x.Names - } - return nil -} - -type CountByNames struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Counts map[string]int64 `protobuf:"bytes,1,rep,name=counts,proto3" json:"counts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - Earliest *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=earliest,proto3" json:"earliest,omitempty"` // Unix timestamp (nanoseconds) -} - -func (x *CountByNames) Reset() { - *x = CountByNames{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CountByNames) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CountByNames) ProtoMessage() {} - -func (x *CountByNames) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CountByNames.ProtoReflect.Descriptor instead. -func (*CountByNames) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{12} -} - -func (x *CountByNames) GetCounts() map[string]int64 { - if x != nil { - return x.Counts - } - return nil -} - -func (x *CountByNames) GetEarliest() *timestamppb.Timestamp { - if x != nil { - return x.Earliest - } - return nil -} - -type CountRegistrationsByIPRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Ip []byte `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` - Range *Range `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"` -} - -func (x *CountRegistrationsByIPRequest) Reset() { - *x = CountRegistrationsByIPRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CountRegistrationsByIPRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CountRegistrationsByIPRequest) ProtoMessage() {} - -func (x *CountRegistrationsByIPRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CountRegistrationsByIPRequest.ProtoReflect.Descriptor instead. -func (*CountRegistrationsByIPRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{13} -} - -func (x *CountRegistrationsByIPRequest) GetIp() []byte { - if x != nil { - return x.Ip - } - return nil -} - -func (x *CountRegistrationsByIPRequest) GetRange() *Range { - if x != nil { - return x.Range - } - return nil -} - -type CountInvalidAuthorizationsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3" json:"hostname,omitempty"` - // Count authorizations that expire in this range. - Range *Range `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` -} - -func (x *CountInvalidAuthorizationsRequest) Reset() { - *x = CountInvalidAuthorizationsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CountInvalidAuthorizationsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CountInvalidAuthorizationsRequest) ProtoMessage() {} - -func (x *CountInvalidAuthorizationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CountInvalidAuthorizationsRequest.ProtoReflect.Descriptor instead. -func (*CountInvalidAuthorizationsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{14} -} - -func (x *CountInvalidAuthorizationsRequest) GetRegistrationID() int64 { - if x != nil { - return x.RegistrationID - } - return 0 -} - -func (x *CountInvalidAuthorizationsRequest) GetHostname() string { - if x != nil { - return x.Hostname - } - return "" -} - -func (x *CountInvalidAuthorizationsRequest) GetRange() *Range { - if x != nil { - return x.Range - } - return nil -} - -type CountOrdersRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AccountID int64 `protobuf:"varint,1,opt,name=accountID,proto3" json:"accountID,omitempty"` - Range *Range `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"` -} - -func (x *CountOrdersRequest) Reset() { - *x = CountOrdersRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CountOrdersRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CountOrdersRequest) ProtoMessage() {} - -func (x *CountOrdersRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CountOrdersRequest.ProtoReflect.Descriptor instead. -func (*CountOrdersRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{15} -} - -func (x *CountOrdersRequest) GetAccountID() int64 { - if x != nil { - return x.AccountID - } - return 0 -} - -func (x *CountOrdersRequest) GetRange() *Range { - if x != nil { - return x.Range - } - return nil -} - -type CountFQDNSetsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Window int64 `protobuf:"varint,1,opt,name=window,proto3" json:"window,omitempty"` - Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"` -} - -func (x *CountFQDNSetsRequest) Reset() { - *x = CountFQDNSetsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CountFQDNSetsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CountFQDNSetsRequest) ProtoMessage() {} - -func (x *CountFQDNSetsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CountFQDNSetsRequest.ProtoReflect.Descriptor instead. -func (*CountFQDNSetsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{16} -} - -func (x *CountFQDNSetsRequest) GetWindow() int64 { - if x != nil { - return x.Window - } - return 0 -} - -func (x *CountFQDNSetsRequest) GetDomains() []string { - if x != nil { - return x.Domains - } - return nil -} - -type FQDNSetExistsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Domains []string `protobuf:"bytes,1,rep,name=domains,proto3" json:"domains,omitempty"` -} - -func (x *FQDNSetExistsRequest) Reset() { - *x = FQDNSetExistsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FQDNSetExistsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FQDNSetExistsRequest) ProtoMessage() {} - -func (x *FQDNSetExistsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FQDNSetExistsRequest.ProtoReflect.Descriptor instead. -func (*FQDNSetExistsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{17} -} - -func (x *FQDNSetExistsRequest) GetDomains() []string { - if x != nil { - return x.Domains - } - return nil -} - -type PreviousCertificateExistsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` - RegID int64 `protobuf:"varint,2,opt,name=regID,proto3" json:"regID,omitempty"` -} - -func (x *PreviousCertificateExistsRequest) Reset() { - *x = PreviousCertificateExistsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PreviousCertificateExistsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PreviousCertificateExistsRequest) ProtoMessage() {} - -func (x *PreviousCertificateExistsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PreviousCertificateExistsRequest.ProtoReflect.Descriptor instead. -func (*PreviousCertificateExistsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{18} -} - -func (x *PreviousCertificateExistsRequest) GetDomain() string { - if x != nil { - return x.Domain - } - return "" -} - -func (x *PreviousCertificateExistsRequest) GetRegID() int64 { - if x != nil { - return x.RegID - } - return 0 -} - -type Exists struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"` -} - -func (x *Exists) Reset() { - *x = Exists{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Exists) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Exists) ProtoMessage() {} - -func (x *Exists) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Exists.ProtoReflect.Descriptor instead. -func (*Exists) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{19} -} - -func (x *Exists) GetExists() bool { - if x != nil { - return x.Exists - } - return false -} - -type AddSerialRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RegID int64 `protobuf:"varint,1,opt,name=regID,proto3" json:"regID,omitempty"` - Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial,omitempty"` - Created int64 `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` // Unix timestamp (nanoseconds) - Expires int64 `protobuf:"varint,4,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds) -} - -func (x *AddSerialRequest) Reset() { - *x = AddSerialRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddSerialRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddSerialRequest) ProtoMessage() {} - -func (x *AddSerialRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddSerialRequest.ProtoReflect.Descriptor instead. -func (*AddSerialRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{20} -} - -func (x *AddSerialRequest) GetRegID() int64 { - if x != nil { - return x.RegID - } - return 0 -} - -func (x *AddSerialRequest) GetSerial() string { - if x != nil { - return x.Serial - } - return "" -} - -func (x *AddSerialRequest) GetCreated() int64 { - if x != nil { - return x.Created - } - return 0 -} - -func (x *AddSerialRequest) GetExpires() int64 { - if x != nil { - return x.Expires - } - return 0 -} - -type AddCertificateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Der []byte `protobuf:"bytes,1,opt,name=der,proto3" json:"der,omitempty"` - RegID int64 `protobuf:"varint,2,opt,name=regID,proto3" json:"regID,omitempty"` - // A signed OCSP response for the certificate contained in "der". - // Note: The certificate status in the OCSP response is assumed to be 0 (good). - Ocsp []byte `protobuf:"bytes,3,opt,name=ocsp,proto3" json:"ocsp,omitempty"` - // An issued time. When not present the SA defaults to using - // the current time. The orphan-finder uses this parameter to add - // certificates with the correct historic issued date - Issued int64 `protobuf:"varint,4,opt,name=issued,proto3" json:"issued,omitempty"` - IssuerID int64 `protobuf:"varint,5,opt,name=issuerID,proto3" json:"issuerID,omitempty"` -} - -func (x *AddCertificateRequest) Reset() { - *x = AddCertificateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddCertificateRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddCertificateRequest) ProtoMessage() {} - -func (x *AddCertificateRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddCertificateRequest.ProtoReflect.Descriptor instead. -func (*AddCertificateRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{21} -} - -func (x *AddCertificateRequest) GetDer() []byte { - if x != nil { - return x.Der - } - return nil -} - -func (x *AddCertificateRequest) GetRegID() int64 { - if x != nil { - return x.RegID - } - return 0 -} - -func (x *AddCertificateRequest) GetOcsp() []byte { - if x != nil { - return x.Ocsp - } - return nil -} - -func (x *AddCertificateRequest) GetIssued() int64 { - if x != nil { - return x.Issued - } - return 0 -} - -func (x *AddCertificateRequest) GetIssuerID() int64 { - if x != nil { - return x.IssuerID - } - return 0 -} - -type AddCertificateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` -} - -func (x *AddCertificateResponse) Reset() { - *x = AddCertificateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddCertificateResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddCertificateResponse) ProtoMessage() {} - -func (x *AddCertificateResponse) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddCertificateResponse.ProtoReflect.Descriptor instead. -func (*AddCertificateResponse) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{22} -} - -func (x *AddCertificateResponse) GetDigest() string { - if x != nil { - return x.Digest - } - return "" -} - -type OrderRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` -} - -func (x *OrderRequest) Reset() { - *x = OrderRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OrderRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OrderRequest) ProtoMessage() {} - -func (x *OrderRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OrderRequest.ProtoReflect.Descriptor instead. -func (*OrderRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{23} -} - -func (x *OrderRequest) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -type NewOrderRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Expires int64 `protobuf:"varint,2,opt,name=expires,proto3" json:"expires,omitempty"` - Names []string `protobuf:"bytes,3,rep,name=names,proto3" json:"names,omitempty"` - V2Authorizations []int64 `protobuf:"varint,4,rep,packed,name=v2Authorizations,proto3" json:"v2Authorizations,omitempty"` -} - -func (x *NewOrderRequest) Reset() { - *x = NewOrderRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NewOrderRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NewOrderRequest) ProtoMessage() {} - -func (x *NewOrderRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NewOrderRequest.ProtoReflect.Descriptor instead. -func (*NewOrderRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{24} -} - -func (x *NewOrderRequest) GetRegistrationID() int64 { - if x != nil { - return x.RegistrationID - } - return 0 -} - -func (x *NewOrderRequest) GetExpires() int64 { - if x != nil { - return x.Expires - } - return 0 -} - -func (x *NewOrderRequest) GetNames() []string { - if x != nil { - return x.Names - } - return nil -} - -func (x *NewOrderRequest) GetV2Authorizations() []int64 { - if x != nil { - return x.V2Authorizations - } - return nil -} - -type NewOrderAndAuthzsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NewOrder *NewOrderRequest `protobuf:"bytes,1,opt,name=newOrder,proto3" json:"newOrder,omitempty"` - NewAuthzs []*proto.Authorization `protobuf:"bytes,2,rep,name=newAuthzs,proto3" json:"newAuthzs,omitempty"` -} - -func (x *NewOrderAndAuthzsRequest) Reset() { - *x = NewOrderAndAuthzsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NewOrderAndAuthzsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NewOrderAndAuthzsRequest) ProtoMessage() {} - -func (x *NewOrderAndAuthzsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NewOrderAndAuthzsRequest.ProtoReflect.Descriptor instead. -func (*NewOrderAndAuthzsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{25} -} - -func (x *NewOrderAndAuthzsRequest) GetNewOrder() *NewOrderRequest { - if x != nil { - return x.NewOrder - } - return nil -} - -func (x *NewOrderAndAuthzsRequest) GetNewAuthzs() []*proto.Authorization { - if x != nil { - return x.NewAuthzs - } - return nil -} - -type SetOrderErrorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Error *proto.ProblemDetails `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` -} - -func (x *SetOrderErrorRequest) Reset() { - *x = SetOrderErrorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetOrderErrorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetOrderErrorRequest) ProtoMessage() {} - -func (x *SetOrderErrorRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetOrderErrorRequest.ProtoReflect.Descriptor instead. -func (*SetOrderErrorRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{26} -} - -func (x *SetOrderErrorRequest) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *SetOrderErrorRequest) GetError() *proto.ProblemDetails { - if x != nil { - return x.Error - } - return nil -} - -type GetValidOrderAuthorizationsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - AcctID int64 `protobuf:"varint,2,opt,name=acctID,proto3" json:"acctID,omitempty"` -} - -func (x *GetValidOrderAuthorizationsRequest) Reset() { - *x = GetValidOrderAuthorizationsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetValidOrderAuthorizationsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetValidOrderAuthorizationsRequest) ProtoMessage() {} - -func (x *GetValidOrderAuthorizationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetValidOrderAuthorizationsRequest.ProtoReflect.Descriptor instead. -func (*GetValidOrderAuthorizationsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{27} -} - -func (x *GetValidOrderAuthorizationsRequest) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *GetValidOrderAuthorizationsRequest) GetAcctID() int64 { - if x != nil { - return x.AcctID - } - return 0 -} - -type GetOrderForNamesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AcctID int64 `protobuf:"varint,1,opt,name=acctID,proto3" json:"acctID,omitempty"` - Names []string `protobuf:"bytes,2,rep,name=names,proto3" json:"names,omitempty"` -} - -func (x *GetOrderForNamesRequest) Reset() { - *x = GetOrderForNamesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetOrderForNamesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetOrderForNamesRequest) ProtoMessage() {} - -func (x *GetOrderForNamesRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetOrderForNamesRequest.ProtoReflect.Descriptor instead. -func (*GetOrderForNamesRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{28} -} - -func (x *GetOrderForNamesRequest) GetAcctID() int64 { - if x != nil { - return x.AcctID - } - return 0 -} - -func (x *GetOrderForNamesRequest) GetNames() []string { - if x != nil { - return x.Names - } - return nil -} - -type FinalizeOrderRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - CertificateSerial string `protobuf:"bytes,2,opt,name=certificateSerial,proto3" json:"certificateSerial,omitempty"` -} - -func (x *FinalizeOrderRequest) Reset() { - *x = FinalizeOrderRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FinalizeOrderRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FinalizeOrderRequest) ProtoMessage() {} - -func (x *FinalizeOrderRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FinalizeOrderRequest.ProtoReflect.Descriptor instead. -func (*FinalizeOrderRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{29} -} - -func (x *FinalizeOrderRequest) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *FinalizeOrderRequest) GetCertificateSerial() string { - if x != nil { - return x.CertificateSerial - } - return "" -} - -type GetAuthorizationsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"` - Now int64 `protobuf:"varint,3,opt,name=now,proto3" json:"now,omitempty"` // Unix timestamp (nanoseconds) -} - -func (x *GetAuthorizationsRequest) Reset() { - *x = GetAuthorizationsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAuthorizationsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAuthorizationsRequest) ProtoMessage() {} - -func (x *GetAuthorizationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAuthorizationsRequest.ProtoReflect.Descriptor instead. -func (*GetAuthorizationsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{30} -} - -func (x *GetAuthorizationsRequest) GetRegistrationID() int64 { - if x != nil { - return x.RegistrationID - } - return 0 -} - -func (x *GetAuthorizationsRequest) GetDomains() []string { - if x != nil { - return x.Domains - } - return nil -} - -func (x *GetAuthorizationsRequest) GetNow() int64 { - if x != nil { - return x.Now - } - return 0 -} - -type Authorizations struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Authz []*Authorizations_MapElement `protobuf:"bytes,1,rep,name=authz,proto3" json:"authz,omitempty"` -} - -func (x *Authorizations) Reset() { - *x = Authorizations{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Authorizations) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Authorizations) ProtoMessage() {} - -func (x *Authorizations) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Authorizations.ProtoReflect.Descriptor instead. -func (*Authorizations) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{31} -} - -func (x *Authorizations) GetAuthz() []*Authorizations_MapElement { - if x != nil { - return x.Authz - } - return nil -} - -type AddPendingAuthorizationsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Authz []*proto.Authorization `protobuf:"bytes,1,rep,name=authz,proto3" json:"authz,omitempty"` -} - -func (x *AddPendingAuthorizationsRequest) Reset() { - *x = AddPendingAuthorizationsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddPendingAuthorizationsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddPendingAuthorizationsRequest) ProtoMessage() {} - -func (x *AddPendingAuthorizationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddPendingAuthorizationsRequest.ProtoReflect.Descriptor instead. -func (*AddPendingAuthorizationsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{32} -} - -func (x *AddPendingAuthorizationsRequest) GetAuthz() []*proto.Authorization { - if x != nil { - return x.Authz - } - return nil -} - -type AuthorizationIDs struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` -} - -func (x *AuthorizationIDs) Reset() { - *x = AuthorizationIDs{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuthorizationIDs) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuthorizationIDs) ProtoMessage() {} - -func (x *AuthorizationIDs) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuthorizationIDs.ProtoReflect.Descriptor instead. -func (*AuthorizationIDs) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{33} -} - -func (x *AuthorizationIDs) GetIds() []string { - if x != nil { - return x.Ids - } - return nil -} - -type AuthorizationID2 struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` -} - -func (x *AuthorizationID2) Reset() { - *x = AuthorizationID2{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuthorizationID2) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuthorizationID2) ProtoMessage() {} - -func (x *AuthorizationID2) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuthorizationID2.ProtoReflect.Descriptor instead. -func (*AuthorizationID2) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{34} -} - -func (x *AuthorizationID2) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -type Authorization2IDs struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Ids []int64 `protobuf:"varint,1,rep,packed,name=ids,proto3" json:"ids,omitempty"` -} - -func (x *Authorization2IDs) Reset() { - *x = Authorization2IDs{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Authorization2IDs) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Authorization2IDs) ProtoMessage() {} - -func (x *Authorization2IDs) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Authorization2IDs.ProtoReflect.Descriptor instead. -func (*Authorization2IDs) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{35} -} - -func (x *Authorization2IDs) GetIds() []int64 { - if x != nil { - return x.Ids - } - return nil -} - -type RevokeCertificateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` - Reason int64 `protobuf:"varint,2,opt,name=reason,proto3" json:"reason,omitempty"` - Date int64 `protobuf:"varint,3,opt,name=date,proto3" json:"date,omitempty"` // Unix timestamp (nanoseconds) - Backdate int64 `protobuf:"varint,5,opt,name=backdate,proto3" json:"backdate,omitempty"` // Unix timestamp (nanoseconds) - Response []byte `protobuf:"bytes,4,opt,name=response,proto3" json:"response,omitempty"` - IssuerID int64 `protobuf:"varint,6,opt,name=issuerID,proto3" json:"issuerID,omitempty"` -} - -func (x *RevokeCertificateRequest) Reset() { - *x = RevokeCertificateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RevokeCertificateRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RevokeCertificateRequest) ProtoMessage() {} - -func (x *RevokeCertificateRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RevokeCertificateRequest.ProtoReflect.Descriptor instead. -func (*RevokeCertificateRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{36} -} - -func (x *RevokeCertificateRequest) GetSerial() string { - if x != nil { - return x.Serial - } - return "" -} - -func (x *RevokeCertificateRequest) GetReason() int64 { - if x != nil { - return x.Reason - } - return 0 -} - -func (x *RevokeCertificateRequest) GetDate() int64 { - if x != nil { - return x.Date - } - return 0 -} - -func (x *RevokeCertificateRequest) GetBackdate() int64 { - if x != nil { - return x.Backdate - } - return 0 -} - -func (x *RevokeCertificateRequest) GetResponse() []byte { - if x != nil { - return x.Response - } - return nil -} - -func (x *RevokeCertificateRequest) GetIssuerID() int64 { - if x != nil { - return x.IssuerID - } - return 0 -} - -type FinalizeAuthorizationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` - Expires int64 `protobuf:"varint,3,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds) - Attempted string `protobuf:"bytes,4,opt,name=attempted,proto3" json:"attempted,omitempty"` - ValidationRecords []*proto.ValidationRecord `protobuf:"bytes,5,rep,name=validationRecords,proto3" json:"validationRecords,omitempty"` - ValidationError *proto.ProblemDetails `protobuf:"bytes,6,opt,name=validationError,proto3" json:"validationError,omitempty"` - AttemptedAt int64 `protobuf:"varint,7,opt,name=attemptedAt,proto3" json:"attemptedAt,omitempty"` // Unix timestamp (nanoseconds) -} - -func (x *FinalizeAuthorizationRequest) Reset() { - *x = FinalizeAuthorizationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FinalizeAuthorizationRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FinalizeAuthorizationRequest) ProtoMessage() {} - -func (x *FinalizeAuthorizationRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FinalizeAuthorizationRequest.ProtoReflect.Descriptor instead. -func (*FinalizeAuthorizationRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{37} -} - -func (x *FinalizeAuthorizationRequest) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *FinalizeAuthorizationRequest) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *FinalizeAuthorizationRequest) GetExpires() int64 { - if x != nil { - return x.Expires - } - return 0 -} - -func (x *FinalizeAuthorizationRequest) GetAttempted() string { - if x != nil { - return x.Attempted - } - return "" -} - -func (x *FinalizeAuthorizationRequest) GetValidationRecords() []*proto.ValidationRecord { - if x != nil { - return x.ValidationRecords - } - return nil -} - -func (x *FinalizeAuthorizationRequest) GetValidationError() *proto.ProblemDetails { - if x != nil { - return x.ValidationError - } - return nil -} - -func (x *FinalizeAuthorizationRequest) GetAttemptedAt() int64 { - if x != nil { - return x.AttemptedAt - } - return 0 -} - -type AddBlockedKeyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - KeyHash []byte `protobuf:"bytes,1,opt,name=keyHash,proto3" json:"keyHash,omitempty"` - Added int64 `protobuf:"varint,2,opt,name=added,proto3" json:"added,omitempty"` // Unix timestamp (nanoseconds) - Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"` - Comment string `protobuf:"bytes,4,opt,name=comment,proto3" json:"comment,omitempty"` - RevokedBy int64 `protobuf:"varint,5,opt,name=revokedBy,proto3" json:"revokedBy,omitempty"` -} - -func (x *AddBlockedKeyRequest) Reset() { - *x = AddBlockedKeyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddBlockedKeyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddBlockedKeyRequest) ProtoMessage() {} - -func (x *AddBlockedKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddBlockedKeyRequest.ProtoReflect.Descriptor instead. -func (*AddBlockedKeyRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{38} -} - -func (x *AddBlockedKeyRequest) GetKeyHash() []byte { - if x != nil { - return x.KeyHash - } - return nil -} - -func (x *AddBlockedKeyRequest) GetAdded() int64 { - if x != nil { - return x.Added - } - return 0 -} - -func (x *AddBlockedKeyRequest) GetSource() string { - if x != nil { - return x.Source - } - return "" -} - -func (x *AddBlockedKeyRequest) GetComment() string { - if x != nil { - return x.Comment - } - return "" -} - -func (x *AddBlockedKeyRequest) GetRevokedBy() int64 { - if x != nil { - return x.RevokedBy - } - return 0 -} - -type KeyBlockedRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - KeyHash []byte `protobuf:"bytes,1,opt,name=keyHash,proto3" json:"keyHash,omitempty"` -} - -func (x *KeyBlockedRequest) Reset() { - *x = KeyBlockedRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *KeyBlockedRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*KeyBlockedRequest) ProtoMessage() {} - -func (x *KeyBlockedRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use KeyBlockedRequest.ProtoReflect.Descriptor instead. -func (*KeyBlockedRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{39} -} - -func (x *KeyBlockedRequest) GetKeyHash() []byte { - if x != nil { - return x.KeyHash - } - return nil -} - -type Incident struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - SerialTable string `protobuf:"bytes,2,opt,name=serialTable,proto3" json:"serialTable,omitempty"` - Url string `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"` - RenewBy int64 `protobuf:"varint,4,opt,name=renewBy,proto3" json:"renewBy,omitempty"` // Unix timestamp (nanoseconds) - Enabled bool `protobuf:"varint,5,opt,name=enabled,proto3" json:"enabled,omitempty"` -} - -func (x *Incident) Reset() { - *x = Incident{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Incident) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Incident) ProtoMessage() {} - -func (x *Incident) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Incident.ProtoReflect.Descriptor instead. -func (*Incident) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{40} -} - -func (x *Incident) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *Incident) GetSerialTable() string { - if x != nil { - return x.SerialTable - } - return "" -} - -func (x *Incident) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *Incident) GetRenewBy() int64 { - if x != nil { - return x.RenewBy - } - return 0 -} - -func (x *Incident) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -type Incidents struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Incidents []*Incident `protobuf:"bytes,1,rep,name=incidents,proto3" json:"incidents,omitempty"` -} - -func (x *Incidents) Reset() { - *x = Incidents{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Incidents) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Incidents) ProtoMessage() {} - -func (x *Incidents) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Incidents.ProtoReflect.Descriptor instead. -func (*Incidents) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{41} -} - -func (x *Incidents) GetIncidents() []*Incident { - if x != nil { - return x.Incidents - } - return nil -} - -type SerialsForIncidentRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - IncidentTable string `protobuf:"bytes,1,opt,name=incidentTable,proto3" json:"incidentTable,omitempty"` -} - -func (x *SerialsForIncidentRequest) Reset() { - *x = SerialsForIncidentRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SerialsForIncidentRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SerialsForIncidentRequest) ProtoMessage() {} - -func (x *SerialsForIncidentRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SerialsForIncidentRequest.ProtoReflect.Descriptor instead. -func (*SerialsForIncidentRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{42} -} - -func (x *SerialsForIncidentRequest) GetIncidentTable() string { - if x != nil { - return x.IncidentTable - } - return "" -} - -type IncidentSerial struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` - RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - OrderID int64 `protobuf:"varint,3,opt,name=orderID,proto3" json:"orderID,omitempty"` - LastNoticeSent int64 `protobuf:"varint,4,opt,name=lastNoticeSent,proto3" json:"lastNoticeSent,omitempty"` // Unix timestamp (nanoseconds) -} - -func (x *IncidentSerial) Reset() { - *x = IncidentSerial{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *IncidentSerial) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*IncidentSerial) ProtoMessage() {} - -func (x *IncidentSerial) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use IncidentSerial.ProtoReflect.Descriptor instead. -func (*IncidentSerial) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{43} -} - -func (x *IncidentSerial) GetSerial() string { - if x != nil { - return x.Serial - } - return "" -} - -func (x *IncidentSerial) GetRegistrationID() int64 { - if x != nil { - return x.RegistrationID - } - return 0 -} - -func (x *IncidentSerial) GetOrderID() int64 { - if x != nil { - return x.OrderID - } - return 0 -} - -func (x *IncidentSerial) GetLastNoticeSent() int64 { - if x != nil { - return x.LastNoticeSent - } - return 0 -} - -type GetRevokedCertsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - IssuerNameID int64 `protobuf:"varint,1,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"` - ExpiresAfter int64 `protobuf:"varint,2,opt,name=expiresAfter,proto3" json:"expiresAfter,omitempty"` // Unix timestamp (nanoseconds), inclusive - ExpiresBefore int64 `protobuf:"varint,3,opt,name=expiresBefore,proto3" json:"expiresBefore,omitempty"` // Unix timestamp (nanoseconds), exclusive - RevokedBefore int64 `protobuf:"varint,4,opt,name=revokedBefore,proto3" json:"revokedBefore,omitempty"` // Unix timestamp (nanoseconds) -} - -func (x *GetRevokedCertsRequest) Reset() { - *x = GetRevokedCertsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetRevokedCertsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetRevokedCertsRequest) ProtoMessage() {} - -func (x *GetRevokedCertsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetRevokedCertsRequest.ProtoReflect.Descriptor instead. -func (*GetRevokedCertsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{44} -} - -func (x *GetRevokedCertsRequest) GetIssuerNameID() int64 { - if x != nil { - return x.IssuerNameID - } - return 0 -} - -func (x *GetRevokedCertsRequest) GetExpiresAfter() int64 { - if x != nil { - return x.ExpiresAfter - } - return 0 -} - -func (x *GetRevokedCertsRequest) GetExpiresBefore() int64 { - if x != nil { - return x.ExpiresBefore - } - return 0 -} - -func (x *GetRevokedCertsRequest) GetRevokedBefore() int64 { - if x != nil { - return x.RevokedBefore - } - return 0 -} - -type RevocationStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Status int64 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` - RevokedReason int64 `protobuf:"varint,2,opt,name=revokedReason,proto3" json:"revokedReason,omitempty"` - RevokedDate *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=revokedDate,proto3" json:"revokedDate,omitempty"` // Unix timestamp (nanoseconds) -} - -func (x *RevocationStatus) Reset() { - *x = RevocationStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RevocationStatus) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RevocationStatus) ProtoMessage() {} - -func (x *RevocationStatus) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RevocationStatus.ProtoReflect.Descriptor instead. -func (*RevocationStatus) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{45} -} - -func (x *RevocationStatus) GetStatus() int64 { - if x != nil { - return x.Status - } - return 0 -} - -func (x *RevocationStatus) GetRevokedReason() int64 { - if x != nil { - return x.RevokedReason - } - return 0 -} - -func (x *RevocationStatus) GetRevokedDate() *timestamppb.Timestamp { - if x != nil { - return x.RevokedDate - } - return nil -} - -type ValidAuthorizations_MapElement struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` - Authz *proto.Authorization `protobuf:"bytes,2,opt,name=authz,proto3" json:"authz,omitempty"` -} - -func (x *ValidAuthorizations_MapElement) Reset() { - *x = ValidAuthorizations_MapElement{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValidAuthorizations_MapElement) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidAuthorizations_MapElement) ProtoMessage() {} - -func (x *ValidAuthorizations_MapElement) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidAuthorizations_MapElement.ProtoReflect.Descriptor instead. -func (*ValidAuthorizations_MapElement) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{5, 0} -} - -func (x *ValidAuthorizations_MapElement) GetDomain() string { - if x != nil { - return x.Domain - } - return "" -} - -func (x *ValidAuthorizations_MapElement) GetAuthz() *proto.Authorization { - if x != nil { - return x.Authz - } - return nil -} - -type Authorizations_MapElement struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` - Authz *proto.Authorization `protobuf:"bytes,2,opt,name=authz,proto3" json:"authz,omitempty"` -} - -func (x *Authorizations_MapElement) Reset() { - *x = Authorizations_MapElement{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Authorizations_MapElement) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Authorizations_MapElement) ProtoMessage() {} - -func (x *Authorizations_MapElement) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Authorizations_MapElement.ProtoReflect.Descriptor instead. -func (*Authorizations_MapElement) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{31, 0} -} - -func (x *Authorizations_MapElement) GetDomain() string { - if x != nil { - return x.Domain - } - return "" -} - -func (x *Authorizations_MapElement) GetAuthz() *proto.Authorization { - if x != nil { - return x.Authz - } - return nil -} - -var File_sa_proto protoreflect.FileDescriptor - -var file_sa_proto_rawDesc = []byte{ - 0x0a, 0x08, 0x73, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x73, 0x61, 0x1a, 0x15, - 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x20, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0x1e, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x57, 0x65, 0x62, - 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6a, 0x77, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x03, 0x6a, 0x77, 0x6b, 0x22, 0x21, 0x0a, 0x0f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0xba, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, - 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x44, 0x12, 0x26, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x55, 0x6e, - 0x74, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x22, 0x73, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, - 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, - 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x6f, 0x77, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6e, 0x6f, 0x77, 0x22, 0xa0, 0x01, 0x0a, 0x13, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6c, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x1a, 0x4f, 0x0a, 0x0a, - 0x4d, 0x61, 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x22, 0x20, 0x0a, - 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, - 0x84, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, - 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x22, 0x3b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x1a, 0x0a, 0x08, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x08, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, - 0x61, 0x74, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6c, 0x61, 0x74, - 0x65, 0x73, 0x74, 0x22, 0x1d, 0x0a, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x22, 0x2c, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, - 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x03, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, - 0x22, 0x58, 0x0a, 0x1f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0xb7, 0x01, 0x0a, 0x0c, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x06, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x61, - 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x2e, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x73, 0x12, 0x36, 0x0a, 0x08, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x08, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x1d, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x02, 0x69, 0x70, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, - 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x21, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, - 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, - 0x65, 0x22, 0x53, 0x0a, 0x12, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, - 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x48, 0x0a, 0x14, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, - 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, - 0x0a, 0x06, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, - 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, - 0x22, 0x30, 0x0a, 0x14, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x73, 0x22, 0x50, 0x0a, 0x20, 0x50, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x14, - 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, - 0x65, 0x67, 0x49, 0x44, 0x22, 0x20, 0x0a, 0x06, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x16, - 0x0a, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x74, 0x0a, 0x10, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, - 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, - 0x67, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, - 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, - 0x15, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, - 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x12, 0x12, - 0x0a, 0x04, 0x6f, 0x63, 0x73, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6f, 0x63, - 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, - 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, - 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x22, 0x30, 0x0a, 0x16, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x1e, 0x0a, 0x0c, 0x4f, 0x72, 0x64, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0x95, 0x01, 0x0a, 0x0f, 0x4e, 0x65, 0x77, - 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, - 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x14, - 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x03, 0x52, 0x10, - 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0x7e, 0x0a, 0x18, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x6e, 0x64, 0x41, - 0x75, 0x74, 0x68, 0x7a, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x08, - 0x6e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x31, 0x0a, - 0x09, 0x6e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, - 0x22, 0x52, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, - 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x22, 0x4c, 0x0a, 0x22, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, - 0x63, 0x74, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x61, 0x63, 0x63, 0x74, - 0x49, 0x44, 0x22, 0x47, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, - 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, - 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x61, - 0x63, 0x63, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x54, 0x0a, 0x14, 0x46, - 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x2c, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, - 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, - 0x6c, 0x22, 0x6e, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, - 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, - 0x10, 0x0a, 0x03, 0x6e, 0x6f, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6e, 0x6f, - 0x77, 0x22, 0x96, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x1a, 0x4f, 0x0a, 0x0a, 0x4d, 0x61, 0x70, - 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, - 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x22, 0x4c, 0x0a, 0x1f, 0x41, 0x64, - 0x64, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, - 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x22, 0x24, 0x0a, 0x10, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x73, 0x12, 0x10, 0x0a, 0x03, - 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x22, - 0x0a, 0x10, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, - 0x44, 0x32, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, - 0x69, 0x64, 0x22, 0x25, 0x0a, 0x11, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x32, 0x49, 0x44, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x03, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0xb2, 0x01, 0x0a, 0x18, 0x52, 0x65, - 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, - 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, - 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x62, 0x61, - 0x63, 0x6b, 0x64, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x62, 0x61, - 0x63, 0x6b, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x22, 0xa6, - 0x02, 0x0a, 0x1c, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, - 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, - 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x12, - 0x44, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x3e, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x52, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, - 0x65, 0x64, 0x41, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, - 0x6d, 0x70, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x96, 0x01, 0x0a, 0x14, 0x41, 0x64, 0x64, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x18, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, - 0x64, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x61, 0x64, 0x64, 0x65, 0x64, - 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, - 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x79, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x79, - 0x22, 0x2d, 0x0a, 0x11, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x22, - 0x82, 0x01, 0x0a, 0x08, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x0b, - 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x10, - 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, - 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x42, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x07, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x42, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x22, 0x37, 0x0a, 0x09, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x2a, 0x0a, 0x09, 0x69, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x52, 0x09, 0x69, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x41, 0x0a, - 0x19, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x63, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x6e, - 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x22, 0x92, 0x01, 0x0a, 0x0e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, - 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x12, 0x26, 0x0a, - 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x63, 0x65, 0x53, 0x65, 0x6e, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x63, - 0x65, 0x53, 0x65, 0x6e, 0x74, 0x22, 0xac, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, - 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, - 0x6d, 0x65, 0x49, 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, - 0x66, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x65, 0x78, 0x70, 0x69, - 0x72, 0x65, 0x73, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x78, 0x70, 0x69, - 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0d, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x24, - 0x0a, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x65, - 0x66, 0x6f, 0x72, 0x65, 0x22, 0x8e, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, - 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x3c, 0x0a, 0x0b, 0x72, 0x65, 0x76, 0x6f, 0x6b, - 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, - 0x64, 0x44, 0x61, 0x74, 0x65, 0x32, 0xb2, 0x0f, 0x0a, 0x18, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, - 0x6c, 0x79, 0x12, 0x53, 0x0a, 0x18, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x23, - 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x00, 0x12, 0x36, 0x0a, 0x0d, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, - 0x51, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x25, - 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x22, 0x00, 0x12, 0x32, 0x0a, 0x0b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, - 0x73, 0x12, 0x16, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, - 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x50, - 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x16, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, - 0x12, 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, - 0x12, 0x4d, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, - 0x37, 0x0a, 0x0d, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, - 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, - 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a, 0x2e, 0x73, 0x61, 0x2e, - 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x1a, 0x46, 0x51, 0x44, 0x4e, - 0x53, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x46, 0x6f, 0x72, - 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x0e, 0x2e, 0x73, 0x61, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, - 0x22, 0x00, 0x12, 0x40, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x1a, 0x13, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, - 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x31, - 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x11, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, - 0x00, 0x12, 0x3d, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x17, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, - 0x12, 0x48, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x78, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x00, 0x12, 0x2b, 0x0a, 0x08, 0x47, 0x65, - 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x4f, 0x72, 0x64, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x72, - 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x1b, 0x2e, 0x73, 0x61, - 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x65, - 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x32, 0x12, 0x22, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x34, - 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, - 0x11, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x12, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x00, 0x12, 0x3c, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x2e, 0x73, 0x61, 0x2e, 0x4a, - 0x53, 0x4f, 0x4e, 0x57, 0x65, 0x62, 0x4b, 0x65, 0x79, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, - 0x39, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x1a, 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x41, 0x0a, 0x0f, 0x47, 0x65, - 0x74, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x1a, 0x2e, - 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, - 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x43, 0x52, 0x4c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x00, 0x30, 0x01, 0x12, 0x35, 0x0a, - 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x12, - 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, - 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x5c, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x26, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x12, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x73, 0x46, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x0a, 0x2e, 0x73, - 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x0d, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x6e, - 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0a, 0x4b, 0x65, 0x79, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x15, 0x2e, 0x73, 0x61, 0x2e, 0x4b, 0x65, 0x79, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a, - 0x2e, 0x73, 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x19, - 0x50, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x24, 0x2e, 0x73, 0x61, 0x2e, 0x50, - 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x4b, 0x0a, - 0x12, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x63, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, - 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0x00, 0x30, 0x01, 0x32, 0xdf, 0x18, 0x0a, 0x10, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, - 0x53, 0x0a, 0x18, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x23, 0x2e, 0x73, 0x61, - 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x22, 0x00, 0x12, 0x36, 0x0a, 0x0d, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, - 0x4e, 0x53, 0x65, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x1b, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x25, 0x2e, 0x73, 0x61, - 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, - 0x32, 0x0a, 0x0b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x16, - 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x32, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x16, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x12, 0x21, 0x2e, - 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x4d, 0x0a, - 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x21, 0x2e, 0x73, - 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x0d, - 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x18, 0x2e, - 0x73, 0x61, 0x2e, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x45, 0x78, 0x69, - 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x1a, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x46, 0x6f, 0x72, 0x57, 0x69, 0x6e, - 0x64, 0x6f, 0x77, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, - 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, - 0x73, 0x61, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x22, 0x00, 0x12, - 0x40, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x00, 0x12, 0x48, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0e, 0x47, - 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0a, 0x2e, - 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x11, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x3d, - 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x1a, 0x17, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x48, 0x0a, - 0x10, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x78, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x00, 0x12, 0x2b, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x4f, 0x72, - 0x64, 0x65, 0x72, 0x12, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, - 0x65, 0x72, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, - 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x1b, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, - 0x65, 0x72, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, - 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, - 0x12, 0x22, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x34, 0x0a, 0x11, 0x47, - 0x65, 0x74, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x11, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, - 0x00, 0x12, 0x3b, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x3c, - 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x2e, 0x73, 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, - 0x57, 0x65, 0x62, 0x4b, 0x65, 0x79, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x13, - 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, - 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x41, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, - 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x1a, 0x2e, 0x73, 0x61, 0x2e, - 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x52, - 0x4c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x00, 0x30, 0x01, 0x12, 0x35, 0x0a, 0x11, 0x47, 0x65, - 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, - 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x12, 0x2e, 0x73, 0x61, - 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, - 0x00, 0x12, 0x52, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x21, 0x2e, 0x73, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x5c, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x26, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, - 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x12, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x73, - 0x46, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x0d, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x6e, 0x63, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x73, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0a, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x65, 0x64, 0x12, 0x15, 0x2e, 0x73, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a, 0x2e, 0x73, 0x61, - 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x19, 0x50, 0x72, 0x65, - 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x24, 0x2e, 0x73, 0x61, 0x2e, 0x50, 0x72, 0x65, 0x76, - 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x45, - 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a, 0x2e, 0x73, - 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x12, 0x53, 0x65, - 0x72, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x12, 0x1d, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72, - 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, - 0x69, 0x61, 0x6c, 0x22, 0x00, 0x30, 0x01, 0x12, 0x43, 0x0a, 0x0d, 0x41, 0x64, 0x64, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, - 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0e, - 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x19, - 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x73, 0x61, 0x2e, 0x41, - 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x50, 0x72, - 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x19, 0x2e, 0x73, - 0x61, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, - 0x00, 0x12, 0x3b, 0x0a, 0x09, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x14, - 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x4a, - 0x0a, 0x18, 0x44, 0x65, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x14, 0x2e, 0x73, 0x61, 0x2e, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x16, 0x44, 0x65, - 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x22, 0x00, 0x12, 0x54, 0x0a, 0x16, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x20, 0x2e, 0x73, - 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0d, 0x46, 0x69, 0x6e, 0x61, - 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x46, - 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x52, 0x0a, - 0x12, 0x4e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x32, 0x12, 0x23, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x65, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x49, 0x44, 0x73, 0x22, - 0x00, 0x12, 0x2e, 0x0a, 0x08, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x13, 0x2e, - 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, - 0x00, 0x12, 0x40, 0x0a, 0x11, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x6e, 0x64, - 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f, - 0x72, 0x64, 0x65, 0x72, 0x41, 0x6e, 0x64, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, - 0x72, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, - 0x12, 0x4b, 0x0a, 0x11, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, - 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x43, 0x0a, - 0x0d, 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, - 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x22, 0x00, 0x12, 0x40, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x50, 0x72, - 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x4f, 0x72, - 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x42, 0x29, 0x5a, 0x27, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x73, - 0x61, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_sa_proto_rawDescOnce sync.Once - file_sa_proto_rawDescData = file_sa_proto_rawDesc -) - -func file_sa_proto_rawDescGZIP() []byte { - file_sa_proto_rawDescOnce.Do(func() { - file_sa_proto_rawDescData = protoimpl.X.CompressGZIP(file_sa_proto_rawDescData) - }) - return file_sa_proto_rawDescData -} - -var file_sa_proto_msgTypes = make([]protoimpl.MessageInfo, 49) -var file_sa_proto_goTypes = []interface{}{ - (*RegistrationID)(nil), // 0: sa.RegistrationID - (*JSONWebKey)(nil), // 1: sa.JSONWebKey - (*AuthorizationID)(nil), // 2: sa.AuthorizationID - (*GetPendingAuthorizationRequest)(nil), // 3: sa.GetPendingAuthorizationRequest - (*GetValidAuthorizationsRequest)(nil), // 4: sa.GetValidAuthorizationsRequest - (*ValidAuthorizations)(nil), // 5: sa.ValidAuthorizations - (*Serial)(nil), // 6: sa.Serial - (*SerialMetadata)(nil), // 7: sa.SerialMetadata - (*Range)(nil), // 8: sa.Range - (*Count)(nil), // 9: sa.Count - (*Timestamps)(nil), // 10: sa.Timestamps - (*CountCertificatesByNamesRequest)(nil), // 11: sa.CountCertificatesByNamesRequest - (*CountByNames)(nil), // 12: sa.CountByNames - (*CountRegistrationsByIPRequest)(nil), // 13: sa.CountRegistrationsByIPRequest - (*CountInvalidAuthorizationsRequest)(nil), // 14: sa.CountInvalidAuthorizationsRequest - (*CountOrdersRequest)(nil), // 15: sa.CountOrdersRequest - (*CountFQDNSetsRequest)(nil), // 16: sa.CountFQDNSetsRequest - (*FQDNSetExistsRequest)(nil), // 17: sa.FQDNSetExistsRequest - (*PreviousCertificateExistsRequest)(nil), // 18: sa.PreviousCertificateExistsRequest - (*Exists)(nil), // 19: sa.Exists - (*AddSerialRequest)(nil), // 20: sa.AddSerialRequest - (*AddCertificateRequest)(nil), // 21: sa.AddCertificateRequest - (*AddCertificateResponse)(nil), // 22: sa.AddCertificateResponse - (*OrderRequest)(nil), // 23: sa.OrderRequest - (*NewOrderRequest)(nil), // 24: sa.NewOrderRequest - (*NewOrderAndAuthzsRequest)(nil), // 25: sa.NewOrderAndAuthzsRequest - (*SetOrderErrorRequest)(nil), // 26: sa.SetOrderErrorRequest - (*GetValidOrderAuthorizationsRequest)(nil), // 27: sa.GetValidOrderAuthorizationsRequest - (*GetOrderForNamesRequest)(nil), // 28: sa.GetOrderForNamesRequest - (*FinalizeOrderRequest)(nil), // 29: sa.FinalizeOrderRequest - (*GetAuthorizationsRequest)(nil), // 30: sa.GetAuthorizationsRequest - (*Authorizations)(nil), // 31: sa.Authorizations - (*AddPendingAuthorizationsRequest)(nil), // 32: sa.AddPendingAuthorizationsRequest - (*AuthorizationIDs)(nil), // 33: sa.AuthorizationIDs - (*AuthorizationID2)(nil), // 34: sa.AuthorizationID2 - (*Authorization2IDs)(nil), // 35: sa.Authorization2IDs - (*RevokeCertificateRequest)(nil), // 36: sa.RevokeCertificateRequest - (*FinalizeAuthorizationRequest)(nil), // 37: sa.FinalizeAuthorizationRequest - (*AddBlockedKeyRequest)(nil), // 38: sa.AddBlockedKeyRequest - (*KeyBlockedRequest)(nil), // 39: sa.KeyBlockedRequest - (*Incident)(nil), // 40: sa.Incident - (*Incidents)(nil), // 41: sa.Incidents - (*SerialsForIncidentRequest)(nil), // 42: sa.SerialsForIncidentRequest - (*IncidentSerial)(nil), // 43: sa.IncidentSerial - (*GetRevokedCertsRequest)(nil), // 44: sa.GetRevokedCertsRequest - (*RevocationStatus)(nil), // 45: sa.RevocationStatus - (*ValidAuthorizations_MapElement)(nil), // 46: sa.ValidAuthorizations.MapElement - nil, // 47: sa.CountByNames.CountsEntry - (*Authorizations_MapElement)(nil), // 48: sa.Authorizations.MapElement - (*timestamppb.Timestamp)(nil), // 49: google.protobuf.Timestamp - (*proto.Authorization)(nil), // 50: core.Authorization - (*proto.ProblemDetails)(nil), // 51: core.ProblemDetails - (*proto.ValidationRecord)(nil), // 52: core.ValidationRecord - (*emptypb.Empty)(nil), // 53: google.protobuf.Empty - (*proto.Registration)(nil), // 54: core.Registration - (*proto.Certificate)(nil), // 55: core.Certificate - (*proto.CertificateStatus)(nil), // 56: core.CertificateStatus - (*proto.Order)(nil), // 57: core.Order - (*proto.CRLEntry)(nil), // 58: core.CRLEntry -} -var file_sa_proto_depIdxs = []int32{ - 46, // 0: sa.ValidAuthorizations.valid:type_name -> sa.ValidAuthorizations.MapElement - 8, // 1: sa.CountCertificatesByNamesRequest.range:type_name -> sa.Range - 47, // 2: sa.CountByNames.counts:type_name -> sa.CountByNames.CountsEntry - 49, // 3: sa.CountByNames.earliest:type_name -> google.protobuf.Timestamp - 8, // 4: sa.CountRegistrationsByIPRequest.range:type_name -> sa.Range - 8, // 5: sa.CountInvalidAuthorizationsRequest.range:type_name -> sa.Range - 8, // 6: sa.CountOrdersRequest.range:type_name -> sa.Range - 24, // 7: sa.NewOrderAndAuthzsRequest.newOrder:type_name -> sa.NewOrderRequest - 50, // 8: sa.NewOrderAndAuthzsRequest.newAuthzs:type_name -> core.Authorization - 51, // 9: sa.SetOrderErrorRequest.error:type_name -> core.ProblemDetails - 48, // 10: sa.Authorizations.authz:type_name -> sa.Authorizations.MapElement - 50, // 11: sa.AddPendingAuthorizationsRequest.authz:type_name -> core.Authorization - 52, // 12: sa.FinalizeAuthorizationRequest.validationRecords:type_name -> core.ValidationRecord - 51, // 13: sa.FinalizeAuthorizationRequest.validationError:type_name -> core.ProblemDetails - 40, // 14: sa.Incidents.incidents:type_name -> sa.Incident - 49, // 15: sa.RevocationStatus.revokedDate:type_name -> google.protobuf.Timestamp - 50, // 16: sa.ValidAuthorizations.MapElement.authz:type_name -> core.Authorization - 50, // 17: sa.Authorizations.MapElement.authz:type_name -> core.Authorization - 11, // 18: sa.StorageAuthorityReadOnly.CountCertificatesByNames:input_type -> sa.CountCertificatesByNamesRequest - 16, // 19: sa.StorageAuthorityReadOnly.CountFQDNSets:input_type -> sa.CountFQDNSetsRequest - 14, // 20: sa.StorageAuthorityReadOnly.CountInvalidAuthorizations2:input_type -> sa.CountInvalidAuthorizationsRequest - 15, // 21: sa.StorageAuthorityReadOnly.CountOrders:input_type -> sa.CountOrdersRequest - 0, // 22: sa.StorageAuthorityReadOnly.CountPendingAuthorizations2:input_type -> sa.RegistrationID - 13, // 23: sa.StorageAuthorityReadOnly.CountRegistrationsByIP:input_type -> sa.CountRegistrationsByIPRequest - 13, // 24: sa.StorageAuthorityReadOnly.CountRegistrationsByIPRange:input_type -> sa.CountRegistrationsByIPRequest - 17, // 25: sa.StorageAuthorityReadOnly.FQDNSetExists:input_type -> sa.FQDNSetExistsRequest - 16, // 26: sa.StorageAuthorityReadOnly.FQDNSetTimestampsForWindow:input_type -> sa.CountFQDNSetsRequest - 34, // 27: sa.StorageAuthorityReadOnly.GetAuthorization2:input_type -> sa.AuthorizationID2 - 30, // 28: sa.StorageAuthorityReadOnly.GetAuthorizations2:input_type -> sa.GetAuthorizationsRequest - 6, // 29: sa.StorageAuthorityReadOnly.GetCertificate:input_type -> sa.Serial - 6, // 30: sa.StorageAuthorityReadOnly.GetCertificateStatus:input_type -> sa.Serial - 53, // 31: sa.StorageAuthorityReadOnly.GetMaxExpiration:input_type -> google.protobuf.Empty - 23, // 32: sa.StorageAuthorityReadOnly.GetOrder:input_type -> sa.OrderRequest - 28, // 33: sa.StorageAuthorityReadOnly.GetOrderForNames:input_type -> sa.GetOrderForNamesRequest - 3, // 34: sa.StorageAuthorityReadOnly.GetPendingAuthorization2:input_type -> sa.GetPendingAuthorizationRequest - 6, // 35: sa.StorageAuthorityReadOnly.GetPrecertificate:input_type -> sa.Serial - 0, // 36: sa.StorageAuthorityReadOnly.GetRegistration:input_type -> sa.RegistrationID - 1, // 37: sa.StorageAuthorityReadOnly.GetRegistrationByKey:input_type -> sa.JSONWebKey - 6, // 38: sa.StorageAuthorityReadOnly.GetRevocationStatus:input_type -> sa.Serial - 44, // 39: sa.StorageAuthorityReadOnly.GetRevokedCerts:input_type -> sa.GetRevokedCertsRequest - 6, // 40: sa.StorageAuthorityReadOnly.GetSerialMetadata:input_type -> sa.Serial - 4, // 41: sa.StorageAuthorityReadOnly.GetValidAuthorizations2:input_type -> sa.GetValidAuthorizationsRequest - 27, // 42: sa.StorageAuthorityReadOnly.GetValidOrderAuthorizations2:input_type -> sa.GetValidOrderAuthorizationsRequest - 6, // 43: sa.StorageAuthorityReadOnly.IncidentsForSerial:input_type -> sa.Serial - 39, // 44: sa.StorageAuthorityReadOnly.KeyBlocked:input_type -> sa.KeyBlockedRequest - 18, // 45: sa.StorageAuthorityReadOnly.PreviousCertificateExists:input_type -> sa.PreviousCertificateExistsRequest - 42, // 46: sa.StorageAuthorityReadOnly.SerialsForIncident:input_type -> sa.SerialsForIncidentRequest - 11, // 47: sa.StorageAuthority.CountCertificatesByNames:input_type -> sa.CountCertificatesByNamesRequest - 16, // 48: sa.StorageAuthority.CountFQDNSets:input_type -> sa.CountFQDNSetsRequest - 14, // 49: sa.StorageAuthority.CountInvalidAuthorizations2:input_type -> sa.CountInvalidAuthorizationsRequest - 15, // 50: sa.StorageAuthority.CountOrders:input_type -> sa.CountOrdersRequest - 0, // 51: sa.StorageAuthority.CountPendingAuthorizations2:input_type -> sa.RegistrationID - 13, // 52: sa.StorageAuthority.CountRegistrationsByIP:input_type -> sa.CountRegistrationsByIPRequest - 13, // 53: sa.StorageAuthority.CountRegistrationsByIPRange:input_type -> sa.CountRegistrationsByIPRequest - 17, // 54: sa.StorageAuthority.FQDNSetExists:input_type -> sa.FQDNSetExistsRequest - 16, // 55: sa.StorageAuthority.FQDNSetTimestampsForWindow:input_type -> sa.CountFQDNSetsRequest - 34, // 56: sa.StorageAuthority.GetAuthorization2:input_type -> sa.AuthorizationID2 - 30, // 57: sa.StorageAuthority.GetAuthorizations2:input_type -> sa.GetAuthorizationsRequest - 6, // 58: sa.StorageAuthority.GetCertificate:input_type -> sa.Serial - 6, // 59: sa.StorageAuthority.GetCertificateStatus:input_type -> sa.Serial - 53, // 60: sa.StorageAuthority.GetMaxExpiration:input_type -> google.protobuf.Empty - 23, // 61: sa.StorageAuthority.GetOrder:input_type -> sa.OrderRequest - 28, // 62: sa.StorageAuthority.GetOrderForNames:input_type -> sa.GetOrderForNamesRequest - 3, // 63: sa.StorageAuthority.GetPendingAuthorization2:input_type -> sa.GetPendingAuthorizationRequest - 6, // 64: sa.StorageAuthority.GetPrecertificate:input_type -> sa.Serial - 0, // 65: sa.StorageAuthority.GetRegistration:input_type -> sa.RegistrationID - 1, // 66: sa.StorageAuthority.GetRegistrationByKey:input_type -> sa.JSONWebKey - 6, // 67: sa.StorageAuthority.GetRevocationStatus:input_type -> sa.Serial - 44, // 68: sa.StorageAuthority.GetRevokedCerts:input_type -> sa.GetRevokedCertsRequest - 6, // 69: sa.StorageAuthority.GetSerialMetadata:input_type -> sa.Serial - 4, // 70: sa.StorageAuthority.GetValidAuthorizations2:input_type -> sa.GetValidAuthorizationsRequest - 27, // 71: sa.StorageAuthority.GetValidOrderAuthorizations2:input_type -> sa.GetValidOrderAuthorizationsRequest - 6, // 72: sa.StorageAuthority.IncidentsForSerial:input_type -> sa.Serial - 39, // 73: sa.StorageAuthority.KeyBlocked:input_type -> sa.KeyBlockedRequest - 18, // 74: sa.StorageAuthority.PreviousCertificateExists:input_type -> sa.PreviousCertificateExistsRequest - 42, // 75: sa.StorageAuthority.SerialsForIncident:input_type -> sa.SerialsForIncidentRequest - 38, // 76: sa.StorageAuthority.AddBlockedKey:input_type -> sa.AddBlockedKeyRequest - 21, // 77: sa.StorageAuthority.AddCertificate:input_type -> sa.AddCertificateRequest - 21, // 78: sa.StorageAuthority.AddPrecertificate:input_type -> sa.AddCertificateRequest - 20, // 79: sa.StorageAuthority.AddSerial:input_type -> sa.AddSerialRequest - 34, // 80: sa.StorageAuthority.DeactivateAuthorization2:input_type -> sa.AuthorizationID2 - 0, // 81: sa.StorageAuthority.DeactivateRegistration:input_type -> sa.RegistrationID - 37, // 82: sa.StorageAuthority.FinalizeAuthorization2:input_type -> sa.FinalizeAuthorizationRequest - 29, // 83: sa.StorageAuthority.FinalizeOrder:input_type -> sa.FinalizeOrderRequest - 32, // 84: sa.StorageAuthority.NewAuthorizations2:input_type -> sa.AddPendingAuthorizationsRequest - 24, // 85: sa.StorageAuthority.NewOrder:input_type -> sa.NewOrderRequest - 25, // 86: sa.StorageAuthority.NewOrderAndAuthzs:input_type -> sa.NewOrderAndAuthzsRequest - 54, // 87: sa.StorageAuthority.NewRegistration:input_type -> core.Registration - 36, // 88: sa.StorageAuthority.RevokeCertificate:input_type -> sa.RevokeCertificateRequest - 26, // 89: sa.StorageAuthority.SetOrderError:input_type -> sa.SetOrderErrorRequest - 23, // 90: sa.StorageAuthority.SetOrderProcessing:input_type -> sa.OrderRequest - 54, // 91: sa.StorageAuthority.UpdateRegistration:input_type -> core.Registration - 36, // 92: sa.StorageAuthority.UpdateRevokedCertificate:input_type -> sa.RevokeCertificateRequest - 12, // 93: sa.StorageAuthorityReadOnly.CountCertificatesByNames:output_type -> sa.CountByNames - 9, // 94: sa.StorageAuthorityReadOnly.CountFQDNSets:output_type -> sa.Count - 9, // 95: sa.StorageAuthorityReadOnly.CountInvalidAuthorizations2:output_type -> sa.Count - 9, // 96: sa.StorageAuthorityReadOnly.CountOrders:output_type -> sa.Count - 9, // 97: sa.StorageAuthorityReadOnly.CountPendingAuthorizations2:output_type -> sa.Count - 9, // 98: sa.StorageAuthorityReadOnly.CountRegistrationsByIP:output_type -> sa.Count - 9, // 99: sa.StorageAuthorityReadOnly.CountRegistrationsByIPRange:output_type -> sa.Count - 19, // 100: sa.StorageAuthorityReadOnly.FQDNSetExists:output_type -> sa.Exists - 10, // 101: sa.StorageAuthorityReadOnly.FQDNSetTimestampsForWindow:output_type -> sa.Timestamps - 50, // 102: sa.StorageAuthorityReadOnly.GetAuthorization2:output_type -> core.Authorization - 31, // 103: sa.StorageAuthorityReadOnly.GetAuthorizations2:output_type -> sa.Authorizations - 55, // 104: sa.StorageAuthorityReadOnly.GetCertificate:output_type -> core.Certificate - 56, // 105: sa.StorageAuthorityReadOnly.GetCertificateStatus:output_type -> core.CertificateStatus - 49, // 106: sa.StorageAuthorityReadOnly.GetMaxExpiration:output_type -> google.protobuf.Timestamp - 57, // 107: sa.StorageAuthorityReadOnly.GetOrder:output_type -> core.Order - 57, // 108: sa.StorageAuthorityReadOnly.GetOrderForNames:output_type -> core.Order - 50, // 109: sa.StorageAuthorityReadOnly.GetPendingAuthorization2:output_type -> core.Authorization - 55, // 110: sa.StorageAuthorityReadOnly.GetPrecertificate:output_type -> core.Certificate - 54, // 111: sa.StorageAuthorityReadOnly.GetRegistration:output_type -> core.Registration - 54, // 112: sa.StorageAuthorityReadOnly.GetRegistrationByKey:output_type -> core.Registration - 45, // 113: sa.StorageAuthorityReadOnly.GetRevocationStatus:output_type -> sa.RevocationStatus - 58, // 114: sa.StorageAuthorityReadOnly.GetRevokedCerts:output_type -> core.CRLEntry - 7, // 115: sa.StorageAuthorityReadOnly.GetSerialMetadata:output_type -> sa.SerialMetadata - 31, // 116: sa.StorageAuthorityReadOnly.GetValidAuthorizations2:output_type -> sa.Authorizations - 31, // 117: sa.StorageAuthorityReadOnly.GetValidOrderAuthorizations2:output_type -> sa.Authorizations - 41, // 118: sa.StorageAuthorityReadOnly.IncidentsForSerial:output_type -> sa.Incidents - 19, // 119: sa.StorageAuthorityReadOnly.KeyBlocked:output_type -> sa.Exists - 19, // 120: sa.StorageAuthorityReadOnly.PreviousCertificateExists:output_type -> sa.Exists - 43, // 121: sa.StorageAuthorityReadOnly.SerialsForIncident:output_type -> sa.IncidentSerial - 12, // 122: sa.StorageAuthority.CountCertificatesByNames:output_type -> sa.CountByNames - 9, // 123: sa.StorageAuthority.CountFQDNSets:output_type -> sa.Count - 9, // 124: sa.StorageAuthority.CountInvalidAuthorizations2:output_type -> sa.Count - 9, // 125: sa.StorageAuthority.CountOrders:output_type -> sa.Count - 9, // 126: sa.StorageAuthority.CountPendingAuthorizations2:output_type -> sa.Count - 9, // 127: sa.StorageAuthority.CountRegistrationsByIP:output_type -> sa.Count - 9, // 128: sa.StorageAuthority.CountRegistrationsByIPRange:output_type -> sa.Count - 19, // 129: sa.StorageAuthority.FQDNSetExists:output_type -> sa.Exists - 10, // 130: sa.StorageAuthority.FQDNSetTimestampsForWindow:output_type -> sa.Timestamps - 50, // 131: sa.StorageAuthority.GetAuthorization2:output_type -> core.Authorization - 31, // 132: sa.StorageAuthority.GetAuthorizations2:output_type -> sa.Authorizations - 55, // 133: sa.StorageAuthority.GetCertificate:output_type -> core.Certificate - 56, // 134: sa.StorageAuthority.GetCertificateStatus:output_type -> core.CertificateStatus - 49, // 135: sa.StorageAuthority.GetMaxExpiration:output_type -> google.protobuf.Timestamp - 57, // 136: sa.StorageAuthority.GetOrder:output_type -> core.Order - 57, // 137: sa.StorageAuthority.GetOrderForNames:output_type -> core.Order - 50, // 138: sa.StorageAuthority.GetPendingAuthorization2:output_type -> core.Authorization - 55, // 139: sa.StorageAuthority.GetPrecertificate:output_type -> core.Certificate - 54, // 140: sa.StorageAuthority.GetRegistration:output_type -> core.Registration - 54, // 141: sa.StorageAuthority.GetRegistrationByKey:output_type -> core.Registration - 45, // 142: sa.StorageAuthority.GetRevocationStatus:output_type -> sa.RevocationStatus - 58, // 143: sa.StorageAuthority.GetRevokedCerts:output_type -> core.CRLEntry - 7, // 144: sa.StorageAuthority.GetSerialMetadata:output_type -> sa.SerialMetadata - 31, // 145: sa.StorageAuthority.GetValidAuthorizations2:output_type -> sa.Authorizations - 31, // 146: sa.StorageAuthority.GetValidOrderAuthorizations2:output_type -> sa.Authorizations - 41, // 147: sa.StorageAuthority.IncidentsForSerial:output_type -> sa.Incidents - 19, // 148: sa.StorageAuthority.KeyBlocked:output_type -> sa.Exists - 19, // 149: sa.StorageAuthority.PreviousCertificateExists:output_type -> sa.Exists - 43, // 150: sa.StorageAuthority.SerialsForIncident:output_type -> sa.IncidentSerial - 53, // 151: sa.StorageAuthority.AddBlockedKey:output_type -> google.protobuf.Empty - 22, // 152: sa.StorageAuthority.AddCertificate:output_type -> sa.AddCertificateResponse - 53, // 153: sa.StorageAuthority.AddPrecertificate:output_type -> google.protobuf.Empty - 53, // 154: sa.StorageAuthority.AddSerial:output_type -> google.protobuf.Empty - 53, // 155: sa.StorageAuthority.DeactivateAuthorization2:output_type -> google.protobuf.Empty - 53, // 156: sa.StorageAuthority.DeactivateRegistration:output_type -> google.protobuf.Empty - 53, // 157: sa.StorageAuthority.FinalizeAuthorization2:output_type -> google.protobuf.Empty - 53, // 158: sa.StorageAuthority.FinalizeOrder:output_type -> google.protobuf.Empty - 35, // 159: sa.StorageAuthority.NewAuthorizations2:output_type -> sa.Authorization2IDs - 57, // 160: sa.StorageAuthority.NewOrder:output_type -> core.Order - 57, // 161: sa.StorageAuthority.NewOrderAndAuthzs:output_type -> core.Order - 54, // 162: sa.StorageAuthority.NewRegistration:output_type -> core.Registration - 53, // 163: sa.StorageAuthority.RevokeCertificate:output_type -> google.protobuf.Empty - 53, // 164: sa.StorageAuthority.SetOrderError:output_type -> google.protobuf.Empty - 53, // 165: sa.StorageAuthority.SetOrderProcessing:output_type -> google.protobuf.Empty - 53, // 166: sa.StorageAuthority.UpdateRegistration:output_type -> google.protobuf.Empty - 53, // 167: sa.StorageAuthority.UpdateRevokedCertificate:output_type -> google.protobuf.Empty - 93, // [93:168] is the sub-list for method output_type - 18, // [18:93] is the sub-list for method input_type - 18, // [18:18] is the sub-list for extension type_name - 18, // [18:18] is the sub-list for extension extendee - 0, // [0:18] is the sub-list for field type_name -} - -func init() { file_sa_proto_init() } -func file_sa_proto_init() { - if File_sa_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_sa_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RegistrationID); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*JSONWebKey); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthorizationID); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetPendingAuthorizationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetValidAuthorizationsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidAuthorizations); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Serial); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SerialMetadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Range); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Count); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Timestamps); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountCertificatesByNamesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountByNames); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountRegistrationsByIPRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountInvalidAuthorizationsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountOrdersRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountFQDNSetsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FQDNSetExistsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PreviousCertificateExistsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Exists); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddSerialRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCertificateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCertificateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OrderRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NewOrderRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NewOrderAndAuthzsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetOrderErrorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetValidOrderAuthorizationsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOrderForNamesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FinalizeOrderRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAuthorizationsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Authorizations); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddPendingAuthorizationsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthorizationIDs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthorizationID2); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Authorization2IDs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RevokeCertificateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FinalizeAuthorizationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddBlockedKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*KeyBlockedRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Incident); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Incidents); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SerialsForIncidentRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IncidentSerial); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetRevokedCertsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RevocationStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidAuthorizations_MapElement); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Authorizations_MapElement); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_sa_proto_rawDesc, - NumEnums: 0, - NumMessages: 49, - NumExtensions: 0, - NumServices: 2, - }, - GoTypes: file_sa_proto_goTypes, - DependencyIndexes: file_sa_proto_depIdxs, - MessageInfos: file_sa_proto_msgTypes, - }.Build() - File_sa_proto = out.File - file_sa_proto_rawDesc = nil - file_sa_proto_goTypes = nil - file_sa_proto_depIdxs = nil -} diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto b/vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto deleted file mode 100644 index 1e4ad6fb878..00000000000 --- a/vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto +++ /dev/null @@ -1,353 +0,0 @@ -syntax = "proto3"; - -package sa; -option go_package = "github.com/letsencrypt/boulder/sa/proto"; - -import "core/proto/core.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; - -// StorageAuthorityReadOnly exposes only those SA methods which are read-only. -service StorageAuthorityReadOnly { - rpc CountCertificatesByNames(CountCertificatesByNamesRequest) returns (CountByNames) {} - rpc CountFQDNSets(CountFQDNSetsRequest) returns (Count) {} - rpc CountInvalidAuthorizations2(CountInvalidAuthorizationsRequest) returns (Count) {} - rpc CountOrders(CountOrdersRequest) returns (Count) {} - rpc CountPendingAuthorizations2(RegistrationID) returns (Count) {} - rpc CountRegistrationsByIP(CountRegistrationsByIPRequest) returns (Count) {} - rpc CountRegistrationsByIPRange(CountRegistrationsByIPRequest) returns (Count) {} - rpc FQDNSetExists(FQDNSetExistsRequest) returns (Exists) {} - rpc FQDNSetTimestampsForWindow(CountFQDNSetsRequest) returns (Timestamps) {} - rpc GetAuthorization2(AuthorizationID2) returns (core.Authorization) {} - rpc GetAuthorizations2(GetAuthorizationsRequest) returns (Authorizations) {} - rpc GetCertificate(Serial) returns (core.Certificate) {} - rpc GetCertificateStatus(Serial) returns (core.CertificateStatus) {} - rpc GetMaxExpiration(google.protobuf.Empty) returns (google.protobuf.Timestamp) {} - rpc GetOrder(OrderRequest) returns (core.Order) {} - rpc GetOrderForNames(GetOrderForNamesRequest) returns (core.Order) {} - rpc GetPendingAuthorization2(GetPendingAuthorizationRequest) returns (core.Authorization) {} - rpc GetPrecertificate(Serial) returns (core.Certificate) {} - rpc GetRegistration(RegistrationID) returns (core.Registration) {} - rpc GetRegistrationByKey(JSONWebKey) returns (core.Registration) {} - rpc GetRevocationStatus(Serial) returns (RevocationStatus) {} - rpc GetRevokedCerts(GetRevokedCertsRequest) returns (stream core.CRLEntry) {} - rpc GetSerialMetadata(Serial) returns (SerialMetadata) {} - rpc GetValidAuthorizations2(GetValidAuthorizationsRequest) returns (Authorizations) {} - rpc GetValidOrderAuthorizations2(GetValidOrderAuthorizationsRequest) returns (Authorizations) {} - rpc IncidentsForSerial(Serial) returns (Incidents) {} - rpc KeyBlocked(KeyBlockedRequest) returns (Exists) {} - rpc PreviousCertificateExists(PreviousCertificateExistsRequest) returns (Exists) {} - rpc SerialsForIncident (SerialsForIncidentRequest) returns (stream IncidentSerial) {} -} - -// StorageAuthority provides full read/write access to the database. -service StorageAuthority { - // Getters: this list must be identical to the StorageAuthorityReadOnly rpcs. - rpc CountCertificatesByNames(CountCertificatesByNamesRequest) returns (CountByNames) {} - rpc CountFQDNSets(CountFQDNSetsRequest) returns (Count) {} - rpc CountInvalidAuthorizations2(CountInvalidAuthorizationsRequest) returns (Count) {} - rpc CountOrders(CountOrdersRequest) returns (Count) {} - rpc CountPendingAuthorizations2(RegistrationID) returns (Count) {} - rpc CountRegistrationsByIP(CountRegistrationsByIPRequest) returns (Count) {} - rpc CountRegistrationsByIPRange(CountRegistrationsByIPRequest) returns (Count) {} - rpc FQDNSetExists(FQDNSetExistsRequest) returns (Exists) {} - rpc FQDNSetTimestampsForWindow(CountFQDNSetsRequest) returns (Timestamps) {} - rpc GetAuthorization2(AuthorizationID2) returns (core.Authorization) {} - rpc GetAuthorizations2(GetAuthorizationsRequest) returns (Authorizations) {} - rpc GetCertificate(Serial) returns (core.Certificate) {} - rpc GetCertificateStatus(Serial) returns (core.CertificateStatus) {} - rpc GetMaxExpiration(google.protobuf.Empty) returns (google.protobuf.Timestamp) {} - rpc GetOrder(OrderRequest) returns (core.Order) {} - rpc GetOrderForNames(GetOrderForNamesRequest) returns (core.Order) {} - rpc GetPendingAuthorization2(GetPendingAuthorizationRequest) returns (core.Authorization) {} - rpc GetPrecertificate(Serial) returns (core.Certificate) {} - rpc GetRegistration(RegistrationID) returns (core.Registration) {} - rpc GetRegistrationByKey(JSONWebKey) returns (core.Registration) {} - rpc GetRevocationStatus(Serial) returns (RevocationStatus) {} - rpc GetRevokedCerts(GetRevokedCertsRequest) returns (stream core.CRLEntry) {} - rpc GetSerialMetadata(Serial) returns (SerialMetadata) {} - rpc GetValidAuthorizations2(GetValidAuthorizationsRequest) returns (Authorizations) {} - rpc GetValidOrderAuthorizations2(GetValidOrderAuthorizationsRequest) returns (Authorizations) {} - rpc IncidentsForSerial(Serial) returns (Incidents) {} - rpc KeyBlocked(KeyBlockedRequest) returns (Exists) {} - rpc PreviousCertificateExists(PreviousCertificateExistsRequest) returns (Exists) {} - rpc SerialsForIncident (SerialsForIncidentRequest) returns (stream IncidentSerial) {} - // Adders - rpc AddBlockedKey(AddBlockedKeyRequest) returns (google.protobuf.Empty) {} - rpc AddCertificate(AddCertificateRequest) returns (AddCertificateResponse) {} - rpc AddPrecertificate(AddCertificateRequest) returns (google.protobuf.Empty) {} - rpc AddSerial(AddSerialRequest) returns (google.protobuf.Empty) {} - rpc DeactivateAuthorization2(AuthorizationID2) returns (google.protobuf.Empty) {} - rpc DeactivateRegistration(RegistrationID) returns (google.protobuf.Empty) {} - rpc FinalizeAuthorization2(FinalizeAuthorizationRequest) returns (google.protobuf.Empty) {} - rpc FinalizeOrder(FinalizeOrderRequest) returns (google.protobuf.Empty) {} - rpc NewAuthorizations2(AddPendingAuthorizationsRequest) returns (Authorization2IDs) {} - rpc NewOrder(NewOrderRequest) returns (core.Order) {} - rpc NewOrderAndAuthzs(NewOrderAndAuthzsRequest) returns (core.Order) {} - rpc NewRegistration(core.Registration) returns (core.Registration) {} - rpc RevokeCertificate(RevokeCertificateRequest) returns (google.protobuf.Empty) {} - rpc SetOrderError(SetOrderErrorRequest) returns (google.protobuf.Empty) {} - rpc SetOrderProcessing(OrderRequest) returns (google.protobuf.Empty) {} - rpc UpdateRegistration(core.Registration) returns (google.protobuf.Empty) {} - rpc UpdateRevokedCertificate(RevokeCertificateRequest) returns (google.protobuf.Empty) {} -} - -message RegistrationID { - int64 id = 1; -} - -message JSONWebKey { - bytes jwk = 1; -} - -message AuthorizationID { - string id = 1; -} - -message GetPendingAuthorizationRequest { - int64 registrationID = 1; - string identifierType = 2; - string identifierValue = 3; - // Result must be valid until at least this Unix timestamp (nanos) - int64 validUntil = 4; -} - -message GetValidAuthorizationsRequest { - int64 registrationID = 1; - repeated string domains = 2; - int64 now = 3; // Unix timestamp (nanoseconds) -} - -message ValidAuthorizations { - message MapElement { - string domain = 1; - core.Authorization authz = 2; - } - repeated MapElement valid = 1; -} - -message Serial { - string serial = 1; -} - -message SerialMetadata { - string serial = 1; - int64 registrationID = 2; - int64 created = 3; // Unix timestamp (nanoseconds) - int64 expires = 4; // Unix timestamp (nanoseconds) -} - -message Range { - int64 earliest = 1; // Unix timestamp (nanoseconds) - int64 latest = 2; // Unix timestamp (nanoseconds) -} - -message Count { - int64 count = 1; -} - -message Timestamps { - repeated int64 timestamps = 1; // Unix timestamp (nanoseconds) -} - -message CountCertificatesByNamesRequest { - Range range = 1; - repeated string names = 2; -} - -message CountByNames { - map counts = 1; - google.protobuf.Timestamp earliest = 2; // Unix timestamp (nanoseconds) -} - -message CountRegistrationsByIPRequest { - bytes ip = 1; - Range range = 2; -} - -message CountInvalidAuthorizationsRequest { - int64 registrationID = 1; - string hostname = 2; - // Count authorizations that expire in this range. - Range range = 3; -} - -message CountOrdersRequest { - int64 accountID = 1; - Range range = 2; -} - -message CountFQDNSetsRequest { - int64 window = 1; - repeated string domains = 2; -} - -message FQDNSetExistsRequest { - repeated string domains = 1; -} - -message PreviousCertificateExistsRequest { - string domain = 1; - int64 regID = 2; -} - -message Exists { - bool exists = 1; -} - -message AddSerialRequest { - int64 regID = 1; - string serial = 2; - int64 created = 3; // Unix timestamp (nanoseconds) - int64 expires = 4; // Unix timestamp (nanoseconds) -} - -message AddCertificateRequest { - bytes der = 1; - int64 regID = 2; - // A signed OCSP response for the certificate contained in "der". - // Note: The certificate status in the OCSP response is assumed to be 0 (good). - bytes ocsp = 3; - // An issued time. When not present the SA defaults to using - // the current time. The orphan-finder uses this parameter to add - // certificates with the correct historic issued date - int64 issued = 4; - int64 issuerID = 5; -} - -message AddCertificateResponse { - string digest = 1; -} - -message OrderRequest { - int64 id = 1; -} - -message NewOrderRequest { - int64 registrationID = 1; - int64 expires = 2; - repeated string names = 3; - repeated int64 v2Authorizations = 4; -} - -message NewOrderAndAuthzsRequest { - NewOrderRequest newOrder = 1; - repeated core.Authorization newAuthzs = 2; -} - -message SetOrderErrorRequest { - int64 id = 1; - core.ProblemDetails error = 2; -} - -message GetValidOrderAuthorizationsRequest { - int64 id = 1; - int64 acctID = 2; -} - -message GetOrderForNamesRequest { - int64 acctID = 1; - repeated string names = 2; -} - -message FinalizeOrderRequest { - int64 id = 1; - string certificateSerial = 2; -} - -message GetAuthorizationsRequest { - int64 registrationID = 1; - repeated string domains = 2; - int64 now = 3; // Unix timestamp (nanoseconds) -} - -message Authorizations { - message MapElement { - string domain = 1; - core.Authorization authz = 2; - } - repeated MapElement authz = 1; -} - -message AddPendingAuthorizationsRequest { - repeated core.Authorization authz = 1; -} - -message AuthorizationIDs { - repeated string ids = 1; -} - -message AuthorizationID2 { - int64 id = 1; -} - -message Authorization2IDs { - repeated int64 ids = 1; -} - -message RevokeCertificateRequest { - string serial = 1; - int64 reason = 2; - int64 date = 3; // Unix timestamp (nanoseconds) - int64 backdate = 5; // Unix timestamp (nanoseconds) - bytes response = 4; - int64 issuerID = 6; -} - -message FinalizeAuthorizationRequest { - int64 id = 1; - string status = 2; - int64 expires = 3; // Unix timestamp (nanoseconds) - string attempted = 4; - repeated core.ValidationRecord validationRecords = 5; - core.ProblemDetails validationError = 6; - int64 attemptedAt = 7; // Unix timestamp (nanoseconds) -} - -message AddBlockedKeyRequest { - bytes keyHash = 1; - int64 added = 2; // Unix timestamp (nanoseconds) - string source = 3; - string comment = 4; - int64 revokedBy = 5; -} - -message KeyBlockedRequest { - bytes keyHash = 1; -} - -message Incident { - int64 id = 1; - string serialTable = 2; - string url = 3; - int64 renewBy = 4; // Unix timestamp (nanoseconds) - bool enabled = 5; -} - -message Incidents { - repeated Incident incidents = 1; -} - -message SerialsForIncidentRequest { - string incidentTable = 1; -} - -message IncidentSerial { - string serial = 1; - int64 registrationID = 2; - int64 orderID = 3; - int64 lastNoticeSent = 4; // Unix timestamp (nanoseconds) -} - -message GetRevokedCertsRequest { - int64 issuerNameID = 1; - int64 expiresAfter = 2; // Unix timestamp (nanoseconds), inclusive - int64 expiresBefore = 3; // Unix timestamp (nanoseconds), exclusive - int64 revokedBefore = 4; // Unix timestamp (nanoseconds) -} - -message RevocationStatus { - int64 status = 1; - int64 revokedReason = 2; - google.protobuf.Timestamp revokedDate = 3; // Unix timestamp (nanoseconds) -} diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go b/vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go deleted file mode 100644 index 37a4e026a65..00000000000 --- a/vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go +++ /dev/null @@ -1,2937 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.20.1 -// source: sa.proto - -package proto - -import ( - context "context" - proto "github.com/letsencrypt/boulder/core/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// StorageAuthorityReadOnlyClient is the client API for StorageAuthorityReadOnly service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type StorageAuthorityReadOnlyClient interface { - CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error) - CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error) - CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) - CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error) - CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) - CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) - CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) - FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) - FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error) - GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) - GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) - GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) - GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error) - GetMaxExpiration(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*timestamppb.Timestamp, error) - GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) - GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) - GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) - GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) - GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) - GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error) - GetRevocationStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*RevocationStatus, error) - GetRevokedCerts(ctx context.Context, in *GetRevokedCertsRequest, opts ...grpc.CallOption) (StorageAuthorityReadOnly_GetRevokedCertsClient, error) - GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error) - GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) - GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) - IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error) - KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error) - PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error) - SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (StorageAuthorityReadOnly_SerialsForIncidentClient, error) -} - -type storageAuthorityReadOnlyClient struct { - cc grpc.ClientConnInterface -} - -func NewStorageAuthorityReadOnlyClient(cc grpc.ClientConnInterface) StorageAuthorityReadOnlyClient { - return &storageAuthorityReadOnlyClient{cc} -} - -func (c *storageAuthorityReadOnlyClient) CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error) { - out := new(CountByNames) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/CountCertificatesByNames", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/CountFQDNSets", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/CountInvalidAuthorizations2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/CountOrders", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/CountPendingAuthorizations2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/CountRegistrationsByIP", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/CountRegistrationsByIPRange", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) { - out := new(Exists) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/FQDNSetExists", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error) { - out := new(Timestamps) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/FQDNSetTimestampsForWindow", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) { - out := new(proto.Authorization) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetAuthorization2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) { - out := new(Authorizations) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetAuthorizations2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) { - out := new(proto.Certificate) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error) { - out := new(proto.CertificateStatus) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetCertificateStatus", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) GetMaxExpiration(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*timestamppb.Timestamp, error) { - out := new(timestamppb.Timestamp) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetMaxExpiration", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) { - out := new(proto.Order) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetOrder", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) { - out := new(proto.Order) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetOrderForNames", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) { - out := new(proto.Authorization) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetPendingAuthorization2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) { - out := new(proto.Certificate) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetPrecertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) { - out := new(proto.Registration) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetRegistration", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error) { - out := new(proto.Registration) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetRegistrationByKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) GetRevocationStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*RevocationStatus, error) { - out := new(RevocationStatus) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetRevocationStatus", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) GetRevokedCerts(ctx context.Context, in *GetRevokedCertsRequest, opts ...grpc.CallOption) (StorageAuthorityReadOnly_GetRevokedCertsClient, error) { - stream, err := c.cc.NewStream(ctx, &StorageAuthorityReadOnly_ServiceDesc.Streams[0], "/sa.StorageAuthorityReadOnly/GetRevokedCerts", opts...) - if err != nil { - return nil, err - } - x := &storageAuthorityReadOnlyGetRevokedCertsClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type StorageAuthorityReadOnly_GetRevokedCertsClient interface { - Recv() (*proto.CRLEntry, error) - grpc.ClientStream -} - -type storageAuthorityReadOnlyGetRevokedCertsClient struct { - grpc.ClientStream -} - -func (x *storageAuthorityReadOnlyGetRevokedCertsClient) Recv() (*proto.CRLEntry, error) { - m := new(proto.CRLEntry) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *storageAuthorityReadOnlyClient) GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error) { - out := new(SerialMetadata) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetSerialMetadata", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) { - out := new(Authorizations) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetValidAuthorizations2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) { - out := new(Authorizations) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetValidOrderAuthorizations2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error) { - out := new(Incidents) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/IncidentsForSerial", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error) { - out := new(Exists) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/KeyBlocked", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error) { - out := new(Exists) - err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/PreviousCertificateExists", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityReadOnlyClient) SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (StorageAuthorityReadOnly_SerialsForIncidentClient, error) { - stream, err := c.cc.NewStream(ctx, &StorageAuthorityReadOnly_ServiceDesc.Streams[1], "/sa.StorageAuthorityReadOnly/SerialsForIncident", opts...) - if err != nil { - return nil, err - } - x := &storageAuthorityReadOnlySerialsForIncidentClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type StorageAuthorityReadOnly_SerialsForIncidentClient interface { - Recv() (*IncidentSerial, error) - grpc.ClientStream -} - -type storageAuthorityReadOnlySerialsForIncidentClient struct { - grpc.ClientStream -} - -func (x *storageAuthorityReadOnlySerialsForIncidentClient) Recv() (*IncidentSerial, error) { - m := new(IncidentSerial) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// StorageAuthorityReadOnlyServer is the server API for StorageAuthorityReadOnly service. -// All implementations must embed UnimplementedStorageAuthorityReadOnlyServer -// for forward compatibility -type StorageAuthorityReadOnlyServer interface { - CountCertificatesByNames(context.Context, *CountCertificatesByNamesRequest) (*CountByNames, error) - CountFQDNSets(context.Context, *CountFQDNSetsRequest) (*Count, error) - CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error) - CountOrders(context.Context, *CountOrdersRequest) (*Count, error) - CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error) - CountRegistrationsByIP(context.Context, *CountRegistrationsByIPRequest) (*Count, error) - CountRegistrationsByIPRange(context.Context, *CountRegistrationsByIPRequest) (*Count, error) - FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) - FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error) - GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error) - GetAuthorizations2(context.Context, *GetAuthorizationsRequest) (*Authorizations, error) - GetCertificate(context.Context, *Serial) (*proto.Certificate, error) - GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error) - GetMaxExpiration(context.Context, *emptypb.Empty) (*timestamppb.Timestamp, error) - GetOrder(context.Context, *OrderRequest) (*proto.Order, error) - GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error) - GetPendingAuthorization2(context.Context, *GetPendingAuthorizationRequest) (*proto.Authorization, error) - GetPrecertificate(context.Context, *Serial) (*proto.Certificate, error) - GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error) - GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error) - GetRevocationStatus(context.Context, *Serial) (*RevocationStatus, error) - GetRevokedCerts(*GetRevokedCertsRequest, StorageAuthorityReadOnly_GetRevokedCertsServer) error - GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error) - GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error) - GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error) - IncidentsForSerial(context.Context, *Serial) (*Incidents, error) - KeyBlocked(context.Context, *KeyBlockedRequest) (*Exists, error) - PreviousCertificateExists(context.Context, *PreviousCertificateExistsRequest) (*Exists, error) - SerialsForIncident(*SerialsForIncidentRequest, StorageAuthorityReadOnly_SerialsForIncidentServer) error - mustEmbedUnimplementedStorageAuthorityReadOnlyServer() -} - -// UnimplementedStorageAuthorityReadOnlyServer must be embedded to have forward compatible implementations. -type UnimplementedStorageAuthorityReadOnlyServer struct { -} - -func (UnimplementedStorageAuthorityReadOnlyServer) CountCertificatesByNames(context.Context, *CountCertificatesByNamesRequest) (*CountByNames, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountCertificatesByNames not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) CountFQDNSets(context.Context, *CountFQDNSetsRequest) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountFQDNSets not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountInvalidAuthorizations2 not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) CountOrders(context.Context, *CountOrdersRequest) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountOrders not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountPendingAuthorizations2 not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) CountRegistrationsByIP(context.Context, *CountRegistrationsByIPRequest) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountRegistrationsByIP not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) CountRegistrationsByIPRange(context.Context, *CountRegistrationsByIPRequest) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountRegistrationsByIPRange not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) { - return nil, status.Errorf(codes.Unimplemented, "method FQDNSetExists not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error) { - return nil, status.Errorf(codes.Unimplemented, "method FQDNSetTimestampsForWindow not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAuthorization2 not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) GetAuthorizations2(context.Context, *GetAuthorizationsRequest) (*Authorizations, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAuthorizations2 not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) GetCertificate(context.Context, *Serial) (*proto.Certificate, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetCertificate not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetCertificateStatus not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) GetMaxExpiration(context.Context, *emptypb.Empty) (*timestamppb.Timestamp, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetMaxExpiration not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) GetOrder(context.Context, *OrderRequest) (*proto.Order, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetOrder not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetOrderForNames not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) GetPendingAuthorization2(context.Context, *GetPendingAuthorizationRequest) (*proto.Authorization, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPendingAuthorization2 not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) GetPrecertificate(context.Context, *Serial) (*proto.Certificate, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPrecertificate not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetRegistration not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetRegistrationByKey not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) GetRevocationStatus(context.Context, *Serial) (*RevocationStatus, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetRevocationStatus not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) GetRevokedCerts(*GetRevokedCertsRequest, StorageAuthorityReadOnly_GetRevokedCertsServer) error { - return status.Errorf(codes.Unimplemented, "method GetRevokedCerts not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSerialMetadata not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetValidAuthorizations2 not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetValidOrderAuthorizations2 not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) IncidentsForSerial(context.Context, *Serial) (*Incidents, error) { - return nil, status.Errorf(codes.Unimplemented, "method IncidentsForSerial not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) KeyBlocked(context.Context, *KeyBlockedRequest) (*Exists, error) { - return nil, status.Errorf(codes.Unimplemented, "method KeyBlocked not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) PreviousCertificateExists(context.Context, *PreviousCertificateExistsRequest) (*Exists, error) { - return nil, status.Errorf(codes.Unimplemented, "method PreviousCertificateExists not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) SerialsForIncident(*SerialsForIncidentRequest, StorageAuthorityReadOnly_SerialsForIncidentServer) error { - return status.Errorf(codes.Unimplemented, "method SerialsForIncident not implemented") -} -func (UnimplementedStorageAuthorityReadOnlyServer) mustEmbedUnimplementedStorageAuthorityReadOnlyServer() { -} - -// UnsafeStorageAuthorityReadOnlyServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to StorageAuthorityReadOnlyServer will -// result in compilation errors. -type UnsafeStorageAuthorityReadOnlyServer interface { - mustEmbedUnimplementedStorageAuthorityReadOnlyServer() -} - -func RegisterStorageAuthorityReadOnlyServer(s grpc.ServiceRegistrar, srv StorageAuthorityReadOnlyServer) { - s.RegisterService(&StorageAuthorityReadOnly_ServiceDesc, srv) -} - -func _StorageAuthorityReadOnly_CountCertificatesByNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountCertificatesByNamesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).CountCertificatesByNames(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/CountCertificatesByNames", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).CountCertificatesByNames(ctx, req.(*CountCertificatesByNamesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_CountFQDNSets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountFQDNSetsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).CountFQDNSets(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/CountFQDNSets", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).CountFQDNSets(ctx, req.(*CountFQDNSetsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_CountInvalidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountInvalidAuthorizationsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).CountInvalidAuthorizations2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/CountInvalidAuthorizations2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).CountInvalidAuthorizations2(ctx, req.(*CountInvalidAuthorizationsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_CountOrders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountOrdersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).CountOrders(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/CountOrders", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).CountOrders(ctx, req.(*CountOrdersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_CountPendingAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RegistrationID) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).CountPendingAuthorizations2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/CountPendingAuthorizations2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).CountPendingAuthorizations2(ctx, req.(*RegistrationID)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_CountRegistrationsByIP_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountRegistrationsByIPRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).CountRegistrationsByIP(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/CountRegistrationsByIP", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).CountRegistrationsByIP(ctx, req.(*CountRegistrationsByIPRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_CountRegistrationsByIPRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountRegistrationsByIPRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).CountRegistrationsByIPRange(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/CountRegistrationsByIPRange", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).CountRegistrationsByIPRange(ctx, req.(*CountRegistrationsByIPRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_FQDNSetExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FQDNSetExistsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).FQDNSetExists(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/FQDNSetExists", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).FQDNSetExists(ctx, req.(*FQDNSetExistsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_FQDNSetTimestampsForWindow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountFQDNSetsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).FQDNSetTimestampsForWindow(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/FQDNSetTimestampsForWindow", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).FQDNSetTimestampsForWindow(ctx, req.(*CountFQDNSetsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_GetAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthorizationID2) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).GetAuthorization2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/GetAuthorization2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).GetAuthorization2(ctx, req.(*AuthorizationID2)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_GetAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAuthorizationsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).GetAuthorizations2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/GetAuthorizations2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).GetAuthorizations2(ctx, req.(*GetAuthorizationsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_GetCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Serial) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).GetCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/GetCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).GetCertificate(ctx, req.(*Serial)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_GetCertificateStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Serial) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).GetCertificateStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/GetCertificateStatus", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).GetCertificateStatus(ctx, req.(*Serial)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_GetMaxExpiration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).GetMaxExpiration(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/GetMaxExpiration", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).GetMaxExpiration(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_GetOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(OrderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).GetOrder(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/GetOrder", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).GetOrder(ctx, req.(*OrderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_GetOrderForNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetOrderForNamesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).GetOrderForNames(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/GetOrderForNames", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).GetOrderForNames(ctx, req.(*GetOrderForNamesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_GetPendingAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPendingAuthorizationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).GetPendingAuthorization2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/GetPendingAuthorization2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).GetPendingAuthorization2(ctx, req.(*GetPendingAuthorizationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_GetPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Serial) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).GetPrecertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/GetPrecertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).GetPrecertificate(ctx, req.(*Serial)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_GetRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RegistrationID) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).GetRegistration(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/GetRegistration", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).GetRegistration(ctx, req.(*RegistrationID)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_GetRegistrationByKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(JSONWebKey) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).GetRegistrationByKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/GetRegistrationByKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).GetRegistrationByKey(ctx, req.(*JSONWebKey)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_GetRevocationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Serial) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).GetRevocationStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/GetRevocationStatus", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).GetRevocationStatus(ctx, req.(*Serial)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_GetRevokedCerts_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(GetRevokedCertsRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(StorageAuthorityReadOnlyServer).GetRevokedCerts(m, &storageAuthorityReadOnlyGetRevokedCertsServer{stream}) -} - -type StorageAuthorityReadOnly_GetRevokedCertsServer interface { - Send(*proto.CRLEntry) error - grpc.ServerStream -} - -type storageAuthorityReadOnlyGetRevokedCertsServer struct { - grpc.ServerStream -} - -func (x *storageAuthorityReadOnlyGetRevokedCertsServer) Send(m *proto.CRLEntry) error { - return x.ServerStream.SendMsg(m) -} - -func _StorageAuthorityReadOnly_GetSerialMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Serial) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).GetSerialMetadata(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/GetSerialMetadata", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).GetSerialMetadata(ctx, req.(*Serial)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_GetValidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetValidAuthorizationsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).GetValidAuthorizations2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/GetValidAuthorizations2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).GetValidAuthorizations2(ctx, req.(*GetValidAuthorizationsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_GetValidOrderAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetValidOrderAuthorizationsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).GetValidOrderAuthorizations2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/GetValidOrderAuthorizations2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).GetValidOrderAuthorizations2(ctx, req.(*GetValidOrderAuthorizationsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_IncidentsForSerial_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Serial) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).IncidentsForSerial(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/IncidentsForSerial", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).IncidentsForSerial(ctx, req.(*Serial)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_KeyBlocked_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(KeyBlockedRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).KeyBlocked(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/KeyBlocked", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).KeyBlocked(ctx, req.(*KeyBlockedRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_PreviousCertificateExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PreviousCertificateExistsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityReadOnlyServer).PreviousCertificateExists(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthorityReadOnly/PreviousCertificateExists", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityReadOnlyServer).PreviousCertificateExists(ctx, req.(*PreviousCertificateExistsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthorityReadOnly_SerialsForIncident_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SerialsForIncidentRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(StorageAuthorityReadOnlyServer).SerialsForIncident(m, &storageAuthorityReadOnlySerialsForIncidentServer{stream}) -} - -type StorageAuthorityReadOnly_SerialsForIncidentServer interface { - Send(*IncidentSerial) error - grpc.ServerStream -} - -type storageAuthorityReadOnlySerialsForIncidentServer struct { - grpc.ServerStream -} - -func (x *storageAuthorityReadOnlySerialsForIncidentServer) Send(m *IncidentSerial) error { - return x.ServerStream.SendMsg(m) -} - -// StorageAuthorityReadOnly_ServiceDesc is the grpc.ServiceDesc for StorageAuthorityReadOnly service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var StorageAuthorityReadOnly_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "sa.StorageAuthorityReadOnly", - HandlerType: (*StorageAuthorityReadOnlyServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CountCertificatesByNames", - Handler: _StorageAuthorityReadOnly_CountCertificatesByNames_Handler, - }, - { - MethodName: "CountFQDNSets", - Handler: _StorageAuthorityReadOnly_CountFQDNSets_Handler, - }, - { - MethodName: "CountInvalidAuthorizations2", - Handler: _StorageAuthorityReadOnly_CountInvalidAuthorizations2_Handler, - }, - { - MethodName: "CountOrders", - Handler: _StorageAuthorityReadOnly_CountOrders_Handler, - }, - { - MethodName: "CountPendingAuthorizations2", - Handler: _StorageAuthorityReadOnly_CountPendingAuthorizations2_Handler, - }, - { - MethodName: "CountRegistrationsByIP", - Handler: _StorageAuthorityReadOnly_CountRegistrationsByIP_Handler, - }, - { - MethodName: "CountRegistrationsByIPRange", - Handler: _StorageAuthorityReadOnly_CountRegistrationsByIPRange_Handler, - }, - { - MethodName: "FQDNSetExists", - Handler: _StorageAuthorityReadOnly_FQDNSetExists_Handler, - }, - { - MethodName: "FQDNSetTimestampsForWindow", - Handler: _StorageAuthorityReadOnly_FQDNSetTimestampsForWindow_Handler, - }, - { - MethodName: "GetAuthorization2", - Handler: _StorageAuthorityReadOnly_GetAuthorization2_Handler, - }, - { - MethodName: "GetAuthorizations2", - Handler: _StorageAuthorityReadOnly_GetAuthorizations2_Handler, - }, - { - MethodName: "GetCertificate", - Handler: _StorageAuthorityReadOnly_GetCertificate_Handler, - }, - { - MethodName: "GetCertificateStatus", - Handler: _StorageAuthorityReadOnly_GetCertificateStatus_Handler, - }, - { - MethodName: "GetMaxExpiration", - Handler: _StorageAuthorityReadOnly_GetMaxExpiration_Handler, - }, - { - MethodName: "GetOrder", - Handler: _StorageAuthorityReadOnly_GetOrder_Handler, - }, - { - MethodName: "GetOrderForNames", - Handler: _StorageAuthorityReadOnly_GetOrderForNames_Handler, - }, - { - MethodName: "GetPendingAuthorization2", - Handler: _StorageAuthorityReadOnly_GetPendingAuthorization2_Handler, - }, - { - MethodName: "GetPrecertificate", - Handler: _StorageAuthorityReadOnly_GetPrecertificate_Handler, - }, - { - MethodName: "GetRegistration", - Handler: _StorageAuthorityReadOnly_GetRegistration_Handler, - }, - { - MethodName: "GetRegistrationByKey", - Handler: _StorageAuthorityReadOnly_GetRegistrationByKey_Handler, - }, - { - MethodName: "GetRevocationStatus", - Handler: _StorageAuthorityReadOnly_GetRevocationStatus_Handler, - }, - { - MethodName: "GetSerialMetadata", - Handler: _StorageAuthorityReadOnly_GetSerialMetadata_Handler, - }, - { - MethodName: "GetValidAuthorizations2", - Handler: _StorageAuthorityReadOnly_GetValidAuthorizations2_Handler, - }, - { - MethodName: "GetValidOrderAuthorizations2", - Handler: _StorageAuthorityReadOnly_GetValidOrderAuthorizations2_Handler, - }, - { - MethodName: "IncidentsForSerial", - Handler: _StorageAuthorityReadOnly_IncidentsForSerial_Handler, - }, - { - MethodName: "KeyBlocked", - Handler: _StorageAuthorityReadOnly_KeyBlocked_Handler, - }, - { - MethodName: "PreviousCertificateExists", - Handler: _StorageAuthorityReadOnly_PreviousCertificateExists_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "GetRevokedCerts", - Handler: _StorageAuthorityReadOnly_GetRevokedCerts_Handler, - ServerStreams: true, - }, - { - StreamName: "SerialsForIncident", - Handler: _StorageAuthorityReadOnly_SerialsForIncident_Handler, - ServerStreams: true, - }, - }, - Metadata: "sa.proto", -} - -// StorageAuthorityClient is the client API for StorageAuthority service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type StorageAuthorityClient interface { - // Getters: this list must be identical to the StorageAuthorityReadOnly rpcs. - CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error) - CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error) - CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) - CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error) - CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) - CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) - CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) - FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) - FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error) - GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) - GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) - GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) - GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error) - GetMaxExpiration(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*timestamppb.Timestamp, error) - GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) - GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) - GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) - GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) - GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) - GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error) - GetRevocationStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*RevocationStatus, error) - GetRevokedCerts(ctx context.Context, in *GetRevokedCertsRequest, opts ...grpc.CallOption) (StorageAuthority_GetRevokedCertsClient, error) - GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error) - GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) - GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) - IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error) - KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error) - PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error) - SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (StorageAuthority_SerialsForIncidentClient, error) - // Adders - AddBlockedKey(ctx context.Context, in *AddBlockedKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*AddCertificateResponse, error) - AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - DeactivateAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*emptypb.Empty, error) - DeactivateRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*emptypb.Empty, error) - FinalizeAuthorization2(ctx context.Context, in *FinalizeAuthorizationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - FinalizeOrder(ctx context.Context, in *FinalizeOrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - NewAuthorizations2(ctx context.Context, in *AddPendingAuthorizationsRequest, opts ...grpc.CallOption) (*Authorization2IDs, error) - NewOrder(ctx context.Context, in *NewOrderRequest, opts ...grpc.CallOption) (*proto.Order, error) - NewOrderAndAuthzs(ctx context.Context, in *NewOrderAndAuthzsRequest, opts ...grpc.CallOption) (*proto.Order, error) - NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error) - RevokeCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - SetOrderError(ctx context.Context, in *SetOrderErrorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - SetOrderProcessing(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - UpdateRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*emptypb.Empty, error) - UpdateRevokedCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) -} - -type storageAuthorityClient struct { - cc grpc.ClientConnInterface -} - -func NewStorageAuthorityClient(cc grpc.ClientConnInterface) StorageAuthorityClient { - return &storageAuthorityClient{cc} -} - -func (c *storageAuthorityClient) CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error) { - out := new(CountByNames) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountCertificatesByNames", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountFQDNSets", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountInvalidAuthorizations2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountOrders", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountPendingAuthorizations2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountRegistrationsByIP", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountRegistrationsByIPRange", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) { - out := new(Exists) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FQDNSetExists", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error) { - out := new(Timestamps) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FQDNSetTimestampsForWindow", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) { - out := new(proto.Authorization) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetAuthorization2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) { - out := new(Authorizations) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetAuthorizations2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) { - out := new(proto.Certificate) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error) { - out := new(proto.CertificateStatus) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetCertificateStatus", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) GetMaxExpiration(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*timestamppb.Timestamp, error) { - out := new(timestamppb.Timestamp) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetMaxExpiration", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) { - out := new(proto.Order) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetOrder", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) { - out := new(proto.Order) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetOrderForNames", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) { - out := new(proto.Authorization) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetPendingAuthorization2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) { - out := new(proto.Certificate) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetPrecertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) { - out := new(proto.Registration) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetRegistration", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error) { - out := new(proto.Registration) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetRegistrationByKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) GetRevocationStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*RevocationStatus, error) { - out := new(RevocationStatus) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetRevocationStatus", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) GetRevokedCerts(ctx context.Context, in *GetRevokedCertsRequest, opts ...grpc.CallOption) (StorageAuthority_GetRevokedCertsClient, error) { - stream, err := c.cc.NewStream(ctx, &StorageAuthority_ServiceDesc.Streams[0], "/sa.StorageAuthority/GetRevokedCerts", opts...) - if err != nil { - return nil, err - } - x := &storageAuthorityGetRevokedCertsClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type StorageAuthority_GetRevokedCertsClient interface { - Recv() (*proto.CRLEntry, error) - grpc.ClientStream -} - -type storageAuthorityGetRevokedCertsClient struct { - grpc.ClientStream -} - -func (x *storageAuthorityGetRevokedCertsClient) Recv() (*proto.CRLEntry, error) { - m := new(proto.CRLEntry) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *storageAuthorityClient) GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error) { - out := new(SerialMetadata) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetSerialMetadata", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) { - out := new(Authorizations) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetValidAuthorizations2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) { - out := new(Authorizations) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetValidOrderAuthorizations2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error) { - out := new(Incidents) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/IncidentsForSerial", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error) { - out := new(Exists) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/KeyBlocked", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error) { - out := new(Exists) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/PreviousCertificateExists", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (StorageAuthority_SerialsForIncidentClient, error) { - stream, err := c.cc.NewStream(ctx, &StorageAuthority_ServiceDesc.Streams[1], "/sa.StorageAuthority/SerialsForIncident", opts...) - if err != nil { - return nil, err - } - x := &storageAuthoritySerialsForIncidentClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type StorageAuthority_SerialsForIncidentClient interface { - Recv() (*IncidentSerial, error) - grpc.ClientStream -} - -type storageAuthoritySerialsForIncidentClient struct { - grpc.ClientStream -} - -func (x *storageAuthoritySerialsForIncidentClient) Recv() (*IncidentSerial, error) { - m := new(IncidentSerial) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *storageAuthorityClient) AddBlockedKey(ctx context.Context, in *AddBlockedKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddBlockedKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*AddCertificateResponse, error) { - out := new(AddCertificateResponse) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddPrecertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddSerial", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) DeactivateAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/DeactivateAuthorization2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) DeactivateRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/DeactivateRegistration", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) FinalizeAuthorization2(ctx context.Context, in *FinalizeAuthorizationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FinalizeAuthorization2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) FinalizeOrder(ctx context.Context, in *FinalizeOrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FinalizeOrder", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) NewAuthorizations2(ctx context.Context, in *AddPendingAuthorizationsRequest, opts ...grpc.CallOption) (*Authorization2IDs, error) { - out := new(Authorization2IDs) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewAuthorizations2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) NewOrder(ctx context.Context, in *NewOrderRequest, opts ...grpc.CallOption) (*proto.Order, error) { - out := new(proto.Order) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewOrder", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) NewOrderAndAuthzs(ctx context.Context, in *NewOrderAndAuthzsRequest, opts ...grpc.CallOption) (*proto.Order, error) { - out := new(proto.Order) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewOrderAndAuthzs", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error) { - out := new(proto.Registration) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewRegistration", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) RevokeCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/RevokeCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) SetOrderError(ctx context.Context, in *SetOrderErrorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/SetOrderError", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) SetOrderProcessing(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/SetOrderProcessing", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) UpdateRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/UpdateRegistration", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageAuthorityClient) UpdateRevokedCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/UpdateRevokedCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// StorageAuthorityServer is the server API for StorageAuthority service. -// All implementations must embed UnimplementedStorageAuthorityServer -// for forward compatibility -type StorageAuthorityServer interface { - // Getters: this list must be identical to the StorageAuthorityReadOnly rpcs. - CountCertificatesByNames(context.Context, *CountCertificatesByNamesRequest) (*CountByNames, error) - CountFQDNSets(context.Context, *CountFQDNSetsRequest) (*Count, error) - CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error) - CountOrders(context.Context, *CountOrdersRequest) (*Count, error) - CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error) - CountRegistrationsByIP(context.Context, *CountRegistrationsByIPRequest) (*Count, error) - CountRegistrationsByIPRange(context.Context, *CountRegistrationsByIPRequest) (*Count, error) - FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) - FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error) - GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error) - GetAuthorizations2(context.Context, *GetAuthorizationsRequest) (*Authorizations, error) - GetCertificate(context.Context, *Serial) (*proto.Certificate, error) - GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error) - GetMaxExpiration(context.Context, *emptypb.Empty) (*timestamppb.Timestamp, error) - GetOrder(context.Context, *OrderRequest) (*proto.Order, error) - GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error) - GetPendingAuthorization2(context.Context, *GetPendingAuthorizationRequest) (*proto.Authorization, error) - GetPrecertificate(context.Context, *Serial) (*proto.Certificate, error) - GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error) - GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error) - GetRevocationStatus(context.Context, *Serial) (*RevocationStatus, error) - GetRevokedCerts(*GetRevokedCertsRequest, StorageAuthority_GetRevokedCertsServer) error - GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error) - GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error) - GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error) - IncidentsForSerial(context.Context, *Serial) (*Incidents, error) - KeyBlocked(context.Context, *KeyBlockedRequest) (*Exists, error) - PreviousCertificateExists(context.Context, *PreviousCertificateExistsRequest) (*Exists, error) - SerialsForIncident(*SerialsForIncidentRequest, StorageAuthority_SerialsForIncidentServer) error - // Adders - AddBlockedKey(context.Context, *AddBlockedKeyRequest) (*emptypb.Empty, error) - AddCertificate(context.Context, *AddCertificateRequest) (*AddCertificateResponse, error) - AddPrecertificate(context.Context, *AddCertificateRequest) (*emptypb.Empty, error) - AddSerial(context.Context, *AddSerialRequest) (*emptypb.Empty, error) - DeactivateAuthorization2(context.Context, *AuthorizationID2) (*emptypb.Empty, error) - DeactivateRegistration(context.Context, *RegistrationID) (*emptypb.Empty, error) - FinalizeAuthorization2(context.Context, *FinalizeAuthorizationRequest) (*emptypb.Empty, error) - FinalizeOrder(context.Context, *FinalizeOrderRequest) (*emptypb.Empty, error) - NewAuthorizations2(context.Context, *AddPendingAuthorizationsRequest) (*Authorization2IDs, error) - NewOrder(context.Context, *NewOrderRequest) (*proto.Order, error) - NewOrderAndAuthzs(context.Context, *NewOrderAndAuthzsRequest) (*proto.Order, error) - NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error) - RevokeCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error) - SetOrderError(context.Context, *SetOrderErrorRequest) (*emptypb.Empty, error) - SetOrderProcessing(context.Context, *OrderRequest) (*emptypb.Empty, error) - UpdateRegistration(context.Context, *proto.Registration) (*emptypb.Empty, error) - UpdateRevokedCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error) - mustEmbedUnimplementedStorageAuthorityServer() -} - -// UnimplementedStorageAuthorityServer must be embedded to have forward compatible implementations. -type UnimplementedStorageAuthorityServer struct { -} - -func (UnimplementedStorageAuthorityServer) CountCertificatesByNames(context.Context, *CountCertificatesByNamesRequest) (*CountByNames, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountCertificatesByNames not implemented") -} -func (UnimplementedStorageAuthorityServer) CountFQDNSets(context.Context, *CountFQDNSetsRequest) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountFQDNSets not implemented") -} -func (UnimplementedStorageAuthorityServer) CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountInvalidAuthorizations2 not implemented") -} -func (UnimplementedStorageAuthorityServer) CountOrders(context.Context, *CountOrdersRequest) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountOrders not implemented") -} -func (UnimplementedStorageAuthorityServer) CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountPendingAuthorizations2 not implemented") -} -func (UnimplementedStorageAuthorityServer) CountRegistrationsByIP(context.Context, *CountRegistrationsByIPRequest) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountRegistrationsByIP not implemented") -} -func (UnimplementedStorageAuthorityServer) CountRegistrationsByIPRange(context.Context, *CountRegistrationsByIPRequest) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountRegistrationsByIPRange not implemented") -} -func (UnimplementedStorageAuthorityServer) FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) { - return nil, status.Errorf(codes.Unimplemented, "method FQDNSetExists not implemented") -} -func (UnimplementedStorageAuthorityServer) FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error) { - return nil, status.Errorf(codes.Unimplemented, "method FQDNSetTimestampsForWindow not implemented") -} -func (UnimplementedStorageAuthorityServer) GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAuthorization2 not implemented") -} -func (UnimplementedStorageAuthorityServer) GetAuthorizations2(context.Context, *GetAuthorizationsRequest) (*Authorizations, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAuthorizations2 not implemented") -} -func (UnimplementedStorageAuthorityServer) GetCertificate(context.Context, *Serial) (*proto.Certificate, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetCertificate not implemented") -} -func (UnimplementedStorageAuthorityServer) GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetCertificateStatus not implemented") -} -func (UnimplementedStorageAuthorityServer) GetMaxExpiration(context.Context, *emptypb.Empty) (*timestamppb.Timestamp, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetMaxExpiration not implemented") -} -func (UnimplementedStorageAuthorityServer) GetOrder(context.Context, *OrderRequest) (*proto.Order, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetOrder not implemented") -} -func (UnimplementedStorageAuthorityServer) GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetOrderForNames not implemented") -} -func (UnimplementedStorageAuthorityServer) GetPendingAuthorization2(context.Context, *GetPendingAuthorizationRequest) (*proto.Authorization, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPendingAuthorization2 not implemented") -} -func (UnimplementedStorageAuthorityServer) GetPrecertificate(context.Context, *Serial) (*proto.Certificate, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPrecertificate not implemented") -} -func (UnimplementedStorageAuthorityServer) GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetRegistration not implemented") -} -func (UnimplementedStorageAuthorityServer) GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetRegistrationByKey not implemented") -} -func (UnimplementedStorageAuthorityServer) GetRevocationStatus(context.Context, *Serial) (*RevocationStatus, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetRevocationStatus not implemented") -} -func (UnimplementedStorageAuthorityServer) GetRevokedCerts(*GetRevokedCertsRequest, StorageAuthority_GetRevokedCertsServer) error { - return status.Errorf(codes.Unimplemented, "method GetRevokedCerts not implemented") -} -func (UnimplementedStorageAuthorityServer) GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSerialMetadata not implemented") -} -func (UnimplementedStorageAuthorityServer) GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetValidAuthorizations2 not implemented") -} -func (UnimplementedStorageAuthorityServer) GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetValidOrderAuthorizations2 not implemented") -} -func (UnimplementedStorageAuthorityServer) IncidentsForSerial(context.Context, *Serial) (*Incidents, error) { - return nil, status.Errorf(codes.Unimplemented, "method IncidentsForSerial not implemented") -} -func (UnimplementedStorageAuthorityServer) KeyBlocked(context.Context, *KeyBlockedRequest) (*Exists, error) { - return nil, status.Errorf(codes.Unimplemented, "method KeyBlocked not implemented") -} -func (UnimplementedStorageAuthorityServer) PreviousCertificateExists(context.Context, *PreviousCertificateExistsRequest) (*Exists, error) { - return nil, status.Errorf(codes.Unimplemented, "method PreviousCertificateExists not implemented") -} -func (UnimplementedStorageAuthorityServer) SerialsForIncident(*SerialsForIncidentRequest, StorageAuthority_SerialsForIncidentServer) error { - return status.Errorf(codes.Unimplemented, "method SerialsForIncident not implemented") -} -func (UnimplementedStorageAuthorityServer) AddBlockedKey(context.Context, *AddBlockedKeyRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddBlockedKey not implemented") -} -func (UnimplementedStorageAuthorityServer) AddCertificate(context.Context, *AddCertificateRequest) (*AddCertificateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddCertificate not implemented") -} -func (UnimplementedStorageAuthorityServer) AddPrecertificate(context.Context, *AddCertificateRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddPrecertificate not implemented") -} -func (UnimplementedStorageAuthorityServer) AddSerial(context.Context, *AddSerialRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddSerial not implemented") -} -func (UnimplementedStorageAuthorityServer) DeactivateAuthorization2(context.Context, *AuthorizationID2) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeactivateAuthorization2 not implemented") -} -func (UnimplementedStorageAuthorityServer) DeactivateRegistration(context.Context, *RegistrationID) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeactivateRegistration not implemented") -} -func (UnimplementedStorageAuthorityServer) FinalizeAuthorization2(context.Context, *FinalizeAuthorizationRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method FinalizeAuthorization2 not implemented") -} -func (UnimplementedStorageAuthorityServer) FinalizeOrder(context.Context, *FinalizeOrderRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method FinalizeOrder not implemented") -} -func (UnimplementedStorageAuthorityServer) NewAuthorizations2(context.Context, *AddPendingAuthorizationsRequest) (*Authorization2IDs, error) { - return nil, status.Errorf(codes.Unimplemented, "method NewAuthorizations2 not implemented") -} -func (UnimplementedStorageAuthorityServer) NewOrder(context.Context, *NewOrderRequest) (*proto.Order, error) { - return nil, status.Errorf(codes.Unimplemented, "method NewOrder not implemented") -} -func (UnimplementedStorageAuthorityServer) NewOrderAndAuthzs(context.Context, *NewOrderAndAuthzsRequest) (*proto.Order, error) { - return nil, status.Errorf(codes.Unimplemented, "method NewOrderAndAuthzs not implemented") -} -func (UnimplementedStorageAuthorityServer) NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error) { - return nil, status.Errorf(codes.Unimplemented, "method NewRegistration not implemented") -} -func (UnimplementedStorageAuthorityServer) RevokeCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method RevokeCertificate not implemented") -} -func (UnimplementedStorageAuthorityServer) SetOrderError(context.Context, *SetOrderErrorRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetOrderError not implemented") -} -func (UnimplementedStorageAuthorityServer) SetOrderProcessing(context.Context, *OrderRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetOrderProcessing not implemented") -} -func (UnimplementedStorageAuthorityServer) UpdateRegistration(context.Context, *proto.Registration) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateRegistration not implemented") -} -func (UnimplementedStorageAuthorityServer) UpdateRevokedCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateRevokedCertificate not implemented") -} -func (UnimplementedStorageAuthorityServer) mustEmbedUnimplementedStorageAuthorityServer() {} - -// UnsafeStorageAuthorityServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to StorageAuthorityServer will -// result in compilation errors. -type UnsafeStorageAuthorityServer interface { - mustEmbedUnimplementedStorageAuthorityServer() -} - -func RegisterStorageAuthorityServer(s grpc.ServiceRegistrar, srv StorageAuthorityServer) { - s.RegisterService(&StorageAuthority_ServiceDesc, srv) -} - -func _StorageAuthority_CountCertificatesByNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountCertificatesByNamesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).CountCertificatesByNames(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/CountCertificatesByNames", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).CountCertificatesByNames(ctx, req.(*CountCertificatesByNamesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_CountFQDNSets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountFQDNSetsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).CountFQDNSets(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/CountFQDNSets", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).CountFQDNSets(ctx, req.(*CountFQDNSetsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_CountInvalidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountInvalidAuthorizationsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).CountInvalidAuthorizations2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/CountInvalidAuthorizations2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).CountInvalidAuthorizations2(ctx, req.(*CountInvalidAuthorizationsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_CountOrders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountOrdersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).CountOrders(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/CountOrders", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).CountOrders(ctx, req.(*CountOrdersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_CountPendingAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RegistrationID) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).CountPendingAuthorizations2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/CountPendingAuthorizations2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).CountPendingAuthorizations2(ctx, req.(*RegistrationID)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_CountRegistrationsByIP_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountRegistrationsByIPRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).CountRegistrationsByIP(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/CountRegistrationsByIP", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).CountRegistrationsByIP(ctx, req.(*CountRegistrationsByIPRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_CountRegistrationsByIPRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountRegistrationsByIPRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).CountRegistrationsByIPRange(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/CountRegistrationsByIPRange", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).CountRegistrationsByIPRange(ctx, req.(*CountRegistrationsByIPRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_FQDNSetExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FQDNSetExistsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).FQDNSetExists(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/FQDNSetExists", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).FQDNSetExists(ctx, req.(*FQDNSetExistsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_FQDNSetTimestampsForWindow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountFQDNSetsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).FQDNSetTimestampsForWindow(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/FQDNSetTimestampsForWindow", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).FQDNSetTimestampsForWindow(ctx, req.(*CountFQDNSetsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_GetAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthorizationID2) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).GetAuthorization2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/GetAuthorization2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetAuthorization2(ctx, req.(*AuthorizationID2)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_GetAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAuthorizationsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).GetAuthorizations2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/GetAuthorizations2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetAuthorizations2(ctx, req.(*GetAuthorizationsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_GetCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Serial) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).GetCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/GetCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetCertificate(ctx, req.(*Serial)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_GetCertificateStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Serial) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).GetCertificateStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/GetCertificateStatus", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetCertificateStatus(ctx, req.(*Serial)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_GetMaxExpiration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).GetMaxExpiration(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/GetMaxExpiration", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetMaxExpiration(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_GetOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(OrderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).GetOrder(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/GetOrder", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetOrder(ctx, req.(*OrderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_GetOrderForNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetOrderForNamesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).GetOrderForNames(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/GetOrderForNames", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetOrderForNames(ctx, req.(*GetOrderForNamesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_GetPendingAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPendingAuthorizationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).GetPendingAuthorization2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/GetPendingAuthorization2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetPendingAuthorization2(ctx, req.(*GetPendingAuthorizationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_GetPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Serial) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).GetPrecertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/GetPrecertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetPrecertificate(ctx, req.(*Serial)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_GetRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RegistrationID) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).GetRegistration(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/GetRegistration", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetRegistration(ctx, req.(*RegistrationID)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_GetRegistrationByKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(JSONWebKey) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).GetRegistrationByKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/GetRegistrationByKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetRegistrationByKey(ctx, req.(*JSONWebKey)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_GetRevocationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Serial) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).GetRevocationStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/GetRevocationStatus", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetRevocationStatus(ctx, req.(*Serial)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_GetRevokedCerts_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(GetRevokedCertsRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(StorageAuthorityServer).GetRevokedCerts(m, &storageAuthorityGetRevokedCertsServer{stream}) -} - -type StorageAuthority_GetRevokedCertsServer interface { - Send(*proto.CRLEntry) error - grpc.ServerStream -} - -type storageAuthorityGetRevokedCertsServer struct { - grpc.ServerStream -} - -func (x *storageAuthorityGetRevokedCertsServer) Send(m *proto.CRLEntry) error { - return x.ServerStream.SendMsg(m) -} - -func _StorageAuthority_GetSerialMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Serial) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).GetSerialMetadata(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/GetSerialMetadata", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetSerialMetadata(ctx, req.(*Serial)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_GetValidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetValidAuthorizationsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).GetValidAuthorizations2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/GetValidAuthorizations2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetValidAuthorizations2(ctx, req.(*GetValidAuthorizationsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_GetValidOrderAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetValidOrderAuthorizationsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).GetValidOrderAuthorizations2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/GetValidOrderAuthorizations2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetValidOrderAuthorizations2(ctx, req.(*GetValidOrderAuthorizationsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_IncidentsForSerial_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Serial) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).IncidentsForSerial(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/IncidentsForSerial", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).IncidentsForSerial(ctx, req.(*Serial)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_KeyBlocked_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(KeyBlockedRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).KeyBlocked(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/KeyBlocked", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).KeyBlocked(ctx, req.(*KeyBlockedRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_PreviousCertificateExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PreviousCertificateExistsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).PreviousCertificateExists(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/PreviousCertificateExists", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).PreviousCertificateExists(ctx, req.(*PreviousCertificateExistsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_SerialsForIncident_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SerialsForIncidentRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(StorageAuthorityServer).SerialsForIncident(m, &storageAuthoritySerialsForIncidentServer{stream}) -} - -type StorageAuthority_SerialsForIncidentServer interface { - Send(*IncidentSerial) error - grpc.ServerStream -} - -type storageAuthoritySerialsForIncidentServer struct { - grpc.ServerStream -} - -func (x *storageAuthoritySerialsForIncidentServer) Send(m *IncidentSerial) error { - return x.ServerStream.SendMsg(m) -} - -func _StorageAuthority_AddBlockedKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddBlockedKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).AddBlockedKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/AddBlockedKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).AddBlockedKey(ctx, req.(*AddBlockedKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_AddCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddCertificateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).AddCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/AddCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).AddCertificate(ctx, req.(*AddCertificateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_AddPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddCertificateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).AddPrecertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/AddPrecertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).AddPrecertificate(ctx, req.(*AddCertificateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_AddSerial_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddSerialRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).AddSerial(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/AddSerial", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).AddSerial(ctx, req.(*AddSerialRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_DeactivateAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthorizationID2) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).DeactivateAuthorization2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/DeactivateAuthorization2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).DeactivateAuthorization2(ctx, req.(*AuthorizationID2)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_DeactivateRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RegistrationID) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).DeactivateRegistration(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/DeactivateRegistration", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).DeactivateRegistration(ctx, req.(*RegistrationID)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_FinalizeAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FinalizeAuthorizationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).FinalizeAuthorization2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/FinalizeAuthorization2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).FinalizeAuthorization2(ctx, req.(*FinalizeAuthorizationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_FinalizeOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FinalizeOrderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).FinalizeOrder(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/FinalizeOrder", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).FinalizeOrder(ctx, req.(*FinalizeOrderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_NewAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddPendingAuthorizationsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).NewAuthorizations2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/NewAuthorizations2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).NewAuthorizations2(ctx, req.(*AddPendingAuthorizationsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_NewOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NewOrderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).NewOrder(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/NewOrder", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).NewOrder(ctx, req.(*NewOrderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_NewOrderAndAuthzs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NewOrderAndAuthzsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).NewOrderAndAuthzs(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/NewOrderAndAuthzs", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).NewOrderAndAuthzs(ctx, req.(*NewOrderAndAuthzsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_NewRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(proto.Registration) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).NewRegistration(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/NewRegistration", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).NewRegistration(ctx, req.(*proto.Registration)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RevokeCertificateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).RevokeCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/RevokeCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).RevokeCertificate(ctx, req.(*RevokeCertificateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_SetOrderError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetOrderErrorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).SetOrderError(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/SetOrderError", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).SetOrderError(ctx, req.(*SetOrderErrorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_SetOrderProcessing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(OrderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).SetOrderProcessing(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/SetOrderProcessing", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).SetOrderProcessing(ctx, req.(*OrderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_UpdateRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(proto.Registration) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).UpdateRegistration(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/UpdateRegistration", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).UpdateRegistration(ctx, req.(*proto.Registration)) - } - return interceptor(ctx, in, info, handler) -} - -func _StorageAuthority_UpdateRevokedCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RevokeCertificateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageAuthorityServer).UpdateRevokedCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sa.StorageAuthority/UpdateRevokedCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).UpdateRevokedCertificate(ctx, req.(*RevokeCertificateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// StorageAuthority_ServiceDesc is the grpc.ServiceDesc for StorageAuthority service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var StorageAuthority_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "sa.StorageAuthority", - HandlerType: (*StorageAuthorityServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CountCertificatesByNames", - Handler: _StorageAuthority_CountCertificatesByNames_Handler, - }, - { - MethodName: "CountFQDNSets", - Handler: _StorageAuthority_CountFQDNSets_Handler, - }, - { - MethodName: "CountInvalidAuthorizations2", - Handler: _StorageAuthority_CountInvalidAuthorizations2_Handler, - }, - { - MethodName: "CountOrders", - Handler: _StorageAuthority_CountOrders_Handler, - }, - { - MethodName: "CountPendingAuthorizations2", - Handler: _StorageAuthority_CountPendingAuthorizations2_Handler, - }, - { - MethodName: "CountRegistrationsByIP", - Handler: _StorageAuthority_CountRegistrationsByIP_Handler, - }, - { - MethodName: "CountRegistrationsByIPRange", - Handler: _StorageAuthority_CountRegistrationsByIPRange_Handler, - }, - { - MethodName: "FQDNSetExists", - Handler: _StorageAuthority_FQDNSetExists_Handler, - }, - { - MethodName: "FQDNSetTimestampsForWindow", - Handler: _StorageAuthority_FQDNSetTimestampsForWindow_Handler, - }, - { - MethodName: "GetAuthorization2", - Handler: _StorageAuthority_GetAuthorization2_Handler, - }, - { - MethodName: "GetAuthorizations2", - Handler: _StorageAuthority_GetAuthorizations2_Handler, - }, - { - MethodName: "GetCertificate", - Handler: _StorageAuthority_GetCertificate_Handler, - }, - { - MethodName: "GetCertificateStatus", - Handler: _StorageAuthority_GetCertificateStatus_Handler, - }, - { - MethodName: "GetMaxExpiration", - Handler: _StorageAuthority_GetMaxExpiration_Handler, - }, - { - MethodName: "GetOrder", - Handler: _StorageAuthority_GetOrder_Handler, - }, - { - MethodName: "GetOrderForNames", - Handler: _StorageAuthority_GetOrderForNames_Handler, - }, - { - MethodName: "GetPendingAuthorization2", - Handler: _StorageAuthority_GetPendingAuthorization2_Handler, - }, - { - MethodName: "GetPrecertificate", - Handler: _StorageAuthority_GetPrecertificate_Handler, - }, - { - MethodName: "GetRegistration", - Handler: _StorageAuthority_GetRegistration_Handler, - }, - { - MethodName: "GetRegistrationByKey", - Handler: _StorageAuthority_GetRegistrationByKey_Handler, - }, - { - MethodName: "GetRevocationStatus", - Handler: _StorageAuthority_GetRevocationStatus_Handler, - }, - { - MethodName: "GetSerialMetadata", - Handler: _StorageAuthority_GetSerialMetadata_Handler, - }, - { - MethodName: "GetValidAuthorizations2", - Handler: _StorageAuthority_GetValidAuthorizations2_Handler, - }, - { - MethodName: "GetValidOrderAuthorizations2", - Handler: _StorageAuthority_GetValidOrderAuthorizations2_Handler, - }, - { - MethodName: "IncidentsForSerial", - Handler: _StorageAuthority_IncidentsForSerial_Handler, - }, - { - MethodName: "KeyBlocked", - Handler: _StorageAuthority_KeyBlocked_Handler, - }, - { - MethodName: "PreviousCertificateExists", - Handler: _StorageAuthority_PreviousCertificateExists_Handler, - }, - { - MethodName: "AddBlockedKey", - Handler: _StorageAuthority_AddBlockedKey_Handler, - }, - { - MethodName: "AddCertificate", - Handler: _StorageAuthority_AddCertificate_Handler, - }, - { - MethodName: "AddPrecertificate", - Handler: _StorageAuthority_AddPrecertificate_Handler, - }, - { - MethodName: "AddSerial", - Handler: _StorageAuthority_AddSerial_Handler, - }, - { - MethodName: "DeactivateAuthorization2", - Handler: _StorageAuthority_DeactivateAuthorization2_Handler, - }, - { - MethodName: "DeactivateRegistration", - Handler: _StorageAuthority_DeactivateRegistration_Handler, - }, - { - MethodName: "FinalizeAuthorization2", - Handler: _StorageAuthority_FinalizeAuthorization2_Handler, - }, - { - MethodName: "FinalizeOrder", - Handler: _StorageAuthority_FinalizeOrder_Handler, - }, - { - MethodName: "NewAuthorizations2", - Handler: _StorageAuthority_NewAuthorizations2_Handler, - }, - { - MethodName: "NewOrder", - Handler: _StorageAuthority_NewOrder_Handler, - }, - { - MethodName: "NewOrderAndAuthzs", - Handler: _StorageAuthority_NewOrderAndAuthzs_Handler, - }, - { - MethodName: "NewRegistration", - Handler: _StorageAuthority_NewRegistration_Handler, - }, - { - MethodName: "RevokeCertificate", - Handler: _StorageAuthority_RevokeCertificate_Handler, - }, - { - MethodName: "SetOrderError", - Handler: _StorageAuthority_SetOrderError_Handler, - }, - { - MethodName: "SetOrderProcessing", - Handler: _StorageAuthority_SetOrderProcessing_Handler, - }, - { - MethodName: "UpdateRegistration", - Handler: _StorageAuthority_UpdateRegistration_Handler, - }, - { - MethodName: "UpdateRevokedCertificate", - Handler: _StorageAuthority_UpdateRevokedCertificate_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "GetRevokedCerts", - Handler: _StorageAuthority_GetRevokedCerts_Handler, - ServerStreams: true, - }, - { - StreamName: "SerialsForIncident", - Handler: _StorageAuthority_SerialsForIncident_Handler, - ServerStreams: true, - }, - }, - Metadata: "sa.proto", -} diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/subsets.go b/vendor/github.com/letsencrypt/boulder/sa/proto/subsets.go deleted file mode 100644 index 5e2221f3f24..00000000000 --- a/vendor/github.com/letsencrypt/boulder/sa/proto/subsets.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copied from the auto-generated sa_grpc.pb.go - -package proto - -import ( - context "context" - - proto "github.com/letsencrypt/boulder/core/proto" - grpc "google.golang.org/grpc" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// StorageAuthorityGetterClient is a read-only subset of the sapb.StorageAuthorityClient interface -type StorageAuthorityGetterClient interface { - GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) - GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error) - GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) - GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) - GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error) - CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error) - CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) - CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) - CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error) - CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error) - FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) - PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error) - GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) - GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) - GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) - CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) - GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) - CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) - GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) - KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error) - GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) - GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) - IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error) -} - -// StorageAuthorityCertificateClient is a subset of the sapb.StorageAuthorityClient interface that only reads and writes certificates -type StorageAuthorityCertificateClient interface { - AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) - AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*AddCertificateResponse, error) - GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) -} diff --git a/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go b/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go new file mode 100644 index 00000000000..8e3bae9965a --- /dev/null +++ b/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go @@ -0,0 +1,46 @@ +// Package strictyaml provides a strict YAML unmarshaller based on `go-yaml/yaml` +package strictyaml + +import ( + "bytes" + "errors" + "fmt" + "io" + + "gopkg.in/yaml.v3" +) + +// Unmarshal takes a byte array and an interface passed by reference. The +// d.Decode will read the next YAML-encoded value from its input and store it in +// the value pointed to by yamlObj. Any config keys from the incoming YAML +// document which do not correspond to expected keys in the config struct will +// result in errors. +// +// TODO(https://github.com/go-yaml/yaml/issues/639): Replace this function with +// yaml.Unmarshal once a more ergonomic way to set unmarshal options is added +// upstream. +func Unmarshal(b []byte, yamlObj interface{}) error { + r := bytes.NewReader(b) + + d := yaml.NewDecoder(r) + d.KnownFields(true) + + // d.Decode will mutate yamlObj + err := d.Decode(yamlObj) + + if err != nil { + // io.EOF is returned when the YAML document is empty. + if errors.Is(err, io.EOF) { + return fmt.Errorf("unmarshalling YAML, bytes cannot be nil: %w", err) + } + return fmt.Errorf("unmarshalling YAML: %w", err) + } + + // As bytes are read by the decoder, the length of the byte buffer should + // decrease. If it doesn't, there's a problem. + if r.Len() != 0 { + return fmt.Errorf("yaml object of size %d bytes had %d bytes of unexpected unconsumed trailers", r.Size(), r.Len()) + } + + return nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index a912b75a05b..62de4dc59aa 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -59,6 +59,18 @@ type ExemplarAdder interface { // CounterOpts is an alias for Opts. See there for doc comments. type CounterOpts Opts +// CounterVecOpts bundles the options to create a CounterVec metric. +// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type CounterVecOpts struct { + CounterOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Contraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // NewCounter creates a new Counter based on the provided CounterOpts. // // The returned implementation also implements ExemplarAdder. It is safe to @@ -174,16 +186,24 @@ type CounterVec struct { // NewCounterVec creates a new CounterVec based on the provided CounterOpts and // partitioned by the given label names. func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - desc := NewDesc( + return V2.NewCounterVec(CounterVecOpts{ + CounterOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts. +func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &CounterVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) } result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} result.init(result) // Init self-collection. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 8bc5e44e2fc..12331542dde 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -14,20 +14,16 @@ package prometheus import ( - "errors" "fmt" "sort" "strings" - "github.com/cespare/xxhash/v2" - "github.com/prometheus/client_golang/prometheus/internal" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - + "github.com/cespare/xxhash/v2" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" ) // Desc is the descriptor used by every Prometheus Metric. It is essentially @@ -54,9 +50,9 @@ type Desc struct { // constLabelPairs contains precalculated DTO label pairs based on // the constant labels. constLabelPairs []*dto.LabelPair - // variableLabels contains names of labels for which the metric - // maintains variable values. - variableLabels []string + // variableLabels contains names of labels and normalization function for + // which the metric maintains variable values. + variableLabels ConstrainedLabels // id is a hash of the values of the ConstLabels and fqName. This // must be unique among all registered descriptors and can therefore be // used as an identifier of the descriptor. @@ -80,10 +76,24 @@ type Desc struct { // For constLabels, the label values are constant. Therefore, they are fully // specified in the Desc. See the Collector example for a usage pattern. func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels) +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName must not be empty. +// +// variableLabels only contain the label names and normalization functions. Their +// label values are variable and therefore not part of the Desc. (They are managed +// within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Collector example for a usage pattern. +func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc { d := &Desc{ fqName: fqName, help: help, - variableLabels: variableLabels, + variableLabels: variableLabels.constrainedLabels(), } if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) @@ -93,7 +103,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // their sorted label names) plus the fqName (at position 0). labelValues := make([]string, 1, len(constLabels)+1) labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels)) labelNameSet := map[string]struct{}{} // First add only the const label names and sort them... for labelName := range constLabels { @@ -118,16 +128,16 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. - for _, labelName := range variableLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + for _, label := range d.variableLabels { + if !checkLabelName(label.Name) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName) return d } - labelNames = append(labelNames, "$"+labelName) - labelNameSet[labelName] = struct{}{} + labelNames = append(labelNames, "$"+label.Name) + labelNameSet[label.Name] = struct{}{} } if len(labelNames) != len(labelNameSet) { - d.err = errors.New("duplicate label names") + d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName) return d } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 811072cbd54..962608f02c6 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -37,35 +37,35 @@ // // type metrics struct { // cpuTemp prometheus.Gauge -// hdFailures *prometheus.CounterVec +// hdFailures *prometheus.CounterVec // } // // func NewMetrics(reg prometheus.Registerer) *metrics { -// m := &metrics{ -// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }), -// hdFailures: prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ), -// } -// reg.MustRegister(m.cpuTemp) -// reg.MustRegister(m.hdFailures) -// return m +// m := &metrics{ +// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }), +// hdFailures: prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ), +// } +// reg.MustRegister(m.cpuTemp) +// reg.MustRegister(m.hdFailures) +// return m // } // // func main() { -// // Create a non-global registry. -// reg := prometheus.NewRegistry() +// // Create a non-global registry. +// reg := prometheus.NewRegistry() // -// // Create new metrics and register them using the custom registry. -// m := NewMetrics(reg) -// // Set values for the new created metrics. +// // Create new metrics and register them using the custom registry. +// m := NewMetrics(reg) +// // Set values for the new created metrics. // m.cpuTemp.Set(65.3) // m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() // diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index 21271a5bb46..f1ea6c76f75 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -55,6 +55,18 @@ type Gauge interface { // GaugeOpts is an alias for Opts. See there for doc comments. type GaugeOpts Opts +// GaugeVecOpts bundles the options to create a GaugeVec metric. +// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type GaugeVecOpts struct { + GaugeOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Contraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // NewGauge creates a new Gauge based on the provided GaugeOpts. // // The returned implementation is optimized for a fast Set method. If you have a @@ -138,16 +150,24 @@ type GaugeVec struct { // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and // partitioned by the given label names. func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - desc := NewDesc( + return V2.NewGaugeVec(GaugeVecOpts{ + GaugeOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts. +func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &GaugeVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) } result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 3a2d55e84b1..2d8d9f64f43 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -23,11 +23,10 @@ import ( "strings" "sync" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/client_golang/prometheus/internal" + + dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" ) const ( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 4c873a01c3d..5b69965b25b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -22,10 +22,9 @@ import ( "sync/atomic" "time" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" + + "google.golang.org/protobuf/proto" ) // nativeHistogramBounds for the frac of observed values. Only relevant for @@ -469,6 +468,18 @@ type HistogramOpts struct { NativeHistogramMaxZeroThreshold float64 } +// HistogramVecOpts bundles the options to create a HistogramVec metric. +// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type HistogramVecOpts struct { + HistogramOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Contraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // NewHistogram creates a new Histogram based on the provided HistogramOpts. It // panics if the buckets in HistogramOpts are not in strictly increasing order. // @@ -489,11 +500,11 @@ func NewHistogram(opts HistogramOpts) Histogram { func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) } for _, n := range desc.variableLabels { - if n == bucketLabel { + if n.Name == bucketLabel { panic(errBucketLabelNotAllowed) } } @@ -544,16 +555,12 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } // Finally we know the final length of h.upperBounds and can make buckets // for both counts as well as exemplars: - h.counts[0] = &histogramCounts{ - buckets: make([]uint64, len(h.upperBounds)), - nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), - nativeHistogramSchema: h.nativeHistogramSchema, - } - h.counts[1] = &histogramCounts{ - buckets: make([]uint64, len(h.upperBounds)), - nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), - nativeHistogramSchema: h.nativeHistogramSchema, - } + h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))} + atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema) + h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))} + atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema) h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.init(h) // Init self-collection. @@ -1034,15 +1041,23 @@ type HistogramVec struct { // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and // partitioned by the given label names. func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - desc := NewDesc( + return V2.NewHistogramVec(HistogramVecOpts{ + HistogramOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts. +func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &HistogramVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newHistogram(desc, opts, lvs...) + return newHistogram(desc, opts.HistogramOpts, lvs...) }), } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index c1b8fad36ae..63ff8683ce5 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -32,6 +32,78 @@ import ( // create a Desc. type Labels map[string]string +// ConstrainedLabels represents a label name and its constrain function +// to normalize label values. This type is commonly used when constructing +// metric vector Collectors. +type ConstrainedLabel struct { + Name string + Constraint func(string) string +} + +func (cl ConstrainedLabel) Constrain(v string) string { + if cl.Constraint == nil { + return v + } + return cl.Constraint(v) +} + +// ConstrainableLabels is an interface that allows creating of labels that can +// be optionally constrained. +// +// prometheus.V2().NewCounterVec(CounterVecOpts{ +// CounterOpts: {...}, // Usual CounterOpts fields +// VariableLabels: []ConstrainedLabels{ +// {Name: "A"}, +// {Name: "B", Constraint: func(v string) string { ... }}, +// }, +// }) +type ConstrainableLabels interface { + constrainedLabels() ConstrainedLabels + labelNames() []string +} + +// ConstrainedLabels represents a collection of label name -> constrain function +// to normalize label values. This type is commonly used when constructing +// metric vector Collectors. +type ConstrainedLabels []ConstrainedLabel + +func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels { + return cls +} + +func (cls ConstrainedLabels) labelNames() []string { + names := make([]string, len(cls)) + for i, label := range cls { + names[i] = label.Name + } + return names +} + +// UnconstrainedLabels represents collection of label without any constraint on +// their value. Thus, it is simply a collection of label names. +// +// UnconstrainedLabels([]string{ "A", "B" }) +// +// is equivalent to +// +// ConstrainedLabels { +// { Name: "A" }, +// { Name: "B" }, +// } +type UnconstrainedLabels []string + +func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels { + constrainedLabels := make([]ConstrainedLabel, len(uls)) + for i, l := range uls { + constrainedLabels[i] = ConstrainedLabel{Name: l} + } + return constrainedLabels +} + +func (uls UnconstrainedLabels) labelNames() []string { + return uls +} + // reservedLabelPrefix is a prefix which is not legal in user-supplied // label names. const reservedLabelPrefix = "__" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index b5119c50410..07bbc9d7687 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -20,11 +20,9 @@ import ( "strings" "time" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" ) var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go index 21086781621..d3482c40ca7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -68,16 +68,17 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou o.apply(rtOpts) } - code, method := checkLabels(counter) + // Curry the counter with dynamic labels before checking the remaining labels. + code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels())) return func(r *http.Request) (*http.Response, error) { resp, err := next.RoundTrip(r) if err == nil { - addWithExemplar( - counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), - 1, - rtOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...) + for label, resolve := range rtOpts.extraLabelsFromCtx { + l[label] = resolve(resp.Request.Context()) + } + addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context())) } return resp, err } @@ -110,17 +111,18 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT o.apply(rtOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels())) return func(r *http.Request) (*http.Response, error) { start := time.Now() resp, err := next.RoundTrip(r) if err == nil { - observeWithExemplar( - obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), - time.Since(start).Seconds(), - rtOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...) + for label, resolve := range rtOpts.extraLabelsFromCtx { + l[label] = resolve(resp.Request.Context()) + } + observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context())) } return resp, err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index cca67a78a90..3793036ad09 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -87,7 +87,8 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) if code { return func(w http.ResponseWriter, r *http.Request) { @@ -95,23 +96,22 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op d := newDelegator(w, nil) next.ServeHTTP(d, r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - time.Since(now).Seconds(), - hOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) } } return func(w http.ResponseWriter, r *http.Request) { now := time.Now() next.ServeHTTP(w, r) - - observeWithExemplar( - obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), - time.Since(now).Seconds(), - hOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) } } @@ -138,28 +138,30 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, o.apply(hOpts) } - code, method := checkLabels(counter) + // Curry the counter with dynamic labels before checking the remaining labels. + code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels())) if code { return func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - addWithExemplar( - counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - 1, - hOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context())) } } return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) - addWithExemplar( - counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), - 1, - hOpts.getExemplarFn(r.Context()), - ) + + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context())) } } @@ -191,16 +193,17 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) return func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, func(status int) { - observeWithExemplar( - obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)), - time.Since(now).Seconds(), - hOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, status, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) }) next.ServeHTTP(d, r) } @@ -231,28 +234,32 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) + if code { return func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) size := computeApproximateRequestSize(r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - float64(size), - hOpts.getExemplarFn(r.Context()), - ) + + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context())) } } return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) size := computeApproximateRequestSize(r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), - float64(size), - hOpts.getExemplarFn(r.Context()), - ) + + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context())) } } @@ -281,16 +288,18 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - float64(d.Written()), - hOpts.getExemplarFn(r.Context()), - ) + + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context())) }) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go index c590d912c94..5d4383aa14a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go @@ -24,14 +24,32 @@ type Option interface { apply(*options) } +// LabelValueFromCtx are used to compute the label value from request context. +// Context can be filled with values from request through middleware. +type LabelValueFromCtx func(ctx context.Context) string + // options store options for both a handler or round tripper. type options struct { - extraMethods []string - getExemplarFn func(requestCtx context.Context) prometheus.Labels + extraMethods []string + getExemplarFn func(requestCtx context.Context) prometheus.Labels + extraLabelsFromCtx map[string]LabelValueFromCtx } func defaultOptions() *options { - return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }} + return &options{ + getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }, + extraLabelsFromCtx: map[string]LabelValueFromCtx{}, + } +} + +func (o *options) emptyDynamicLabels() prometheus.Labels { + labels := prometheus.Labels{} + + for label := range o.extraLabelsFromCtx { + labels[label] = "" + } + + return labels } type optionApplyFunc func(*options) @@ -48,11 +66,19 @@ func WithExtraMethods(methods ...string) Option { }) } -// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics. -// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric -// will get instrumented without exemplar. +// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics. +// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but +// metric will continue to observe/increment. func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option { return optionApplyFunc(func(o *options) { o.getExemplarFn = getExemplarFn }) } + +// WithLabelFromCtx registers a label for dynamic resolution with access to context. +// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage +func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option { + return optionApplyFunc(func(o *options) { + o.extraLabelsFromCtx[name] = valueFn + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 09e34d307c9..44da9433bee 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -21,18 +21,17 @@ import ( "path/filepath" "runtime" "sort" + "strconv" "strings" "sync" "unicode/utf8" - "github.com/cespare/xxhash/v2" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/prometheus/internal" + "github.com/cespare/xxhash/v2" dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" + "github.com/prometheus/common/expfmt" + "google.golang.org/protobuf/proto" ) const ( @@ -933,6 +932,10 @@ func checkMetricConsistency( h.WriteString(lp.GetValue()) h.Write(separatorByteSlice) } + if dtoMetric.TimestampMs != nil { + h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10)) + h.Write(separatorByteSlice) + } hSum := h.Sum64() if _, exists := metricHashes[hSum]; exists { return fmt.Errorf( @@ -962,7 +965,7 @@ func checkDescConsistency( copy(lpsFromDesc, desc.constLabelPairs) for _, l := range desc.variableLabels { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l), + Name: proto.String(l.Name), }) } if len(lpsFromDesc) != len(dtoMetric.Label) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 7bc448a8939..dd359264e59 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -22,11 +22,10 @@ import ( "sync/atomic" "time" - "github.com/beorn7/perks/quantile" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" + + "github.com/beorn7/perks/quantile" + "google.golang.org/protobuf/proto" ) // quantileLabel is used for the label that defines the quantile in a @@ -148,6 +147,18 @@ type SummaryOpts struct { BufCap uint32 } +// SummaryVecOpts bundles the options to create a SummaryVec metric. +// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type SummaryVecOpts struct { + SummaryOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Contraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // Problem with the sliding-window decay algorithm... The Merge method of // perk/quantile is actually not working as advertised - and it might be // unfixable, as the underlying algorithm is apparently not capable of merging @@ -178,11 +189,11 @@ func NewSummary(opts SummaryOpts) Summary { func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) } for _, n := range desc.variableLabels { - if n == quantileLabel { + if n.Name == quantileLabel { panic(errQuantileLabelNotAllowed) } } @@ -530,20 +541,28 @@ type SummaryVec struct { // it is handled by the Prometheus server internally, “quantile†is an illegal // label name. NewSummaryVec will panic if this label name is used. func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - for _, ln := range labelNames { + return V2.NewSummaryVec(SummaryVecOpts{ + SummaryOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts. +func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec { + for _, ln := range opts.VariableLabels.labelNames() { if ln == quantileLabel { panic(errQuantileLabelNotAllowed) } } - desc := NewDesc( + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &SummaryVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newSummary(desc, opts, lvs...) + return newSummary(desc, opts.SummaryOpts, lvs...) }), } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go index f28a76f3a62..52344fef53f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -23,7 +23,9 @@ type Timer struct { } // NewTimer creates a new Timer. The provided Observer is used to observe a -// duration in seconds. Timer is usually used to time a function call in the +// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar +// later on will be also supported. +// Timer is usually used to time a function call in the // following way: // // func TimeMe() { @@ -31,6 +33,14 @@ type Timer struct { // defer timer.ObserveDuration() // // Do actual work. // } +// +// or +// +// func TimeMeWithExemplar() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDurationWithExemplar(exemplar) +// // Do actual work. +// } func NewTimer(o Observer) *Timer { return &Timer{ begin: time.Now(), @@ -53,3 +63,19 @@ func (t *Timer) ObserveDuration() time.Duration { } return d } + +// ObserveDurationWithExemplar is like ObserveDuration, but it will also +// observe exemplar with the duration unless exemplar is nil or provided Observer can't +// be casted to ExemplarObserver. +func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration { + d := time.Since(t.begin) + eo, ok := t.observer.(ExemplarObserver) + if ok && exemplar != nil { + eo.ObserveWithExemplar(d.Seconds(), exemplar) + return d + } + if t.observer != nil { + t.observer.Observe(d.Seconds()) + } + return d +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index 2d3abc1cbd6..5f6bb80014d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -19,13 +19,11 @@ import ( "time" "unicode/utf8" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "github.com/prometheus/client_golang/prometheus/internal" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // ValueType is an enumeration of metric types that represent a simple value. @@ -188,9 +186,9 @@ func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { return desc.constLabelPairs } labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, n := range desc.variableLabels { + for i, l := range desc.variableLabels { labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(n), + Name: proto.String(l.Name), Value: proto.String(labelValues[i]), }) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 7ae322590c8..386fb2d23e2 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -72,6 +72,7 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { // with a performance overhead (for creating and processing the Labels map). // See also the CounterVec example. func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + lvs = constrainLabelValues(m.desc, lvs, m.curry) h, err := m.hashLabelValues(lvs) if err != nil { return false @@ -91,6 +92,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { // This method is used for the same purpose as DeleteLabelValues(...string). See // there for pros and cons of the two methods. func (m *MetricVec) Delete(labels Labels) bool { + labels = constrainLabels(m.desc, labels) h, err := m.hashLabels(labels) if err != nil { return false @@ -106,6 +108,7 @@ func (m *MetricVec) Delete(labels Labels) bool { // Note that curried labels will never be matched if deleting from the curried vector. // To match curried labels with DeletePartialMatch, it must be called on the base vector. func (m *MetricVec) DeletePartialMatch(labels Labels) int { + labels = constrainLabels(m.desc, labels) return m.metricMap.deleteByLabels(labels, m.curry) } @@ -145,10 +148,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { iCurry int ) for i, label := range m.desc.variableLabels { - val, ok := labels[label] + val, ok := labels[label.Name] if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { if ok { - return nil, fmt.Errorf("label name %q is already curried", label) + return nil, fmt.Errorf("label name %q is already curried", label.Name) } newCurry = append(newCurry, oldCurry[iCurry]) iCurry++ @@ -156,7 +159,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { if !ok { continue // Label stays uncurried. } - newCurry = append(newCurry, curriedLabelValue{i, val}) + newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)}) } } if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { @@ -199,6 +202,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { // a wrapper around MetricVec, implementing a vector for a specific Metric // implementation, for example GaugeVec. func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + lvs = constrainLabelValues(m.desc, lvs, m.curry) h, err := m.hashLabelValues(lvs) if err != nil { return nil, err @@ -224,6 +228,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { // around MetricVec, implementing a vector for a specific Metric implementation, // for example GaugeVec. func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + labels = constrainLabels(m.desc, labels) h, err := m.hashLabels(labels) if err != nil { return nil, err @@ -266,16 +271,16 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { iCurry int ) for i, label := range m.desc.variableLabels { - val, ok := labels[label] + val, ok := labels[label.Name] if iCurry < len(curry) && curry[iCurry].index == i { if ok { - return 0, fmt.Errorf("label name %q is already curried", label) + return 0, fmt.Errorf("label name %q is already curried", label.Name) } h = m.hashAdd(h, curry[iCurry].value) iCurry++ } else { if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) + return 0, fmt.Errorf("label name %q missing in label map", label.Name) } h = m.hashAdd(h, val) } @@ -453,7 +458,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values [] func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { for l, v := range labels { // Check if the target label exists in our metrics and get the index. - varLabelIndex, validLabel := indexOf(l, desc.variableLabels) + varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames()) if validLabel { // Check the value of that label against the target value. // We don't consider curried values in partial matches. @@ -605,7 +610,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe iCurry++ continue } - if values[i] != labels[k] { + if values[i] != labels[k.Name] { return false } } @@ -621,7 +626,7 @@ func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) [] iCurry++ continue } - labelValues[i] = labels[k] + labelValues[i] = labels[k.Name] } return labelValues } @@ -640,3 +645,34 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { } return labelValues } + +func constrainLabels(desc *Desc, labels Labels) Labels { + constrainedValues := make(Labels, len(labels)) + for l, v := range labels { + if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok { + constrainedValues[l] = desc.variableLabels[i].Constrain(v) + continue + } + constrainedValues[l] = v + } + return constrainedValues +} + +func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string { + constrainedValues := make([]string, len(lvs)) + var iCurry, iLVs int + for i := 0; i < len(lvs)+len(curry); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + iCurry++ + continue + } + + if i < len(desc.variableLabels) { + constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs]) + } else { + constrainedValues[iLVs] = lvs[iLVs] + } + iLVs++ + } + return constrainedValues +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go new file mode 100644 index 00000000000..42bc3a8f066 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go @@ -0,0 +1,23 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +type v2 struct{} + +// V2 is a struct that can be referenced to access experimental API that might +// be present in v2 of client golang someday. It offers extended functionality +// of v1 with slightly changed API. It is acceptable to use some pieces from v1 +// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc` +// in the same codebase. +var V2 = v2{} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 1498ee144cb..25da157f152 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -17,12 +17,10 @@ import ( "fmt" "sort" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" + "github.com/prometheus/client_golang/prometheus/internal" dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" + "google.golang.org/protobuf/proto" ) // WrapRegistererWith returns a Registerer wrapping the provided @@ -206,7 +204,7 @@ func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { constLabels[ln] = lv } // NewDesc will do remaining validations. - newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) + newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) // Propagate errors if there was any. This will override any errer // created by NewDesc above, i.e. earlier errors get precedence. if desc.err != nil { diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 35904ea1986..2b5bca4b999 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,25 +1,38 @@ +// Copyright 2013 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.20.3 // source: io/prometheus/client/metrics.proto package io_prometheus_client import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type MetricType int32 @@ -38,23 +51,25 @@ const ( MetricType_GAUGE_HISTOGRAM MetricType = 5 ) -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", - 5: "GAUGE_HISTOGRAM", -} - -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, - "GAUGE_HISTOGRAM": 5, -} +// Enum value maps for MetricType. +var ( + MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", + } + MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, + } +) func (x MetricType) Enum() *MetricType { p := new(MetricType) @@ -63,449 +78,519 @@ func (x MetricType) Enum() *MetricType { } func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") +func (MetricType) Descriptor() protoreflect.EnumDescriptor { + return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor() +} + +func (MetricType) Type() protoreflect.EnumType { + return &file_io_prometheus_client_metrics_proto_enumTypes[0] +} + +func (x MetricType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *MetricType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) if err != nil { return err } - *x = MetricType(value) + *x = MetricType(num) return nil } +// Deprecated: Use MetricType.Descriptor instead. func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{0} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} } type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} -func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{0} + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` } -func (m *LabelPair) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelPair.Unmarshal(m, b) -} -func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) -} -func (m *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(m, src) +func (x *LabelPair) Reset() { + *x = LabelPair{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *LabelPair) XXX_Size() int { - return xxx_messageInfo_LabelPair.Size(m) + +func (x *LabelPair) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *LabelPair) XXX_DiscardUnknown() { - xxx_messageInfo_LabelPair.DiscardUnknown(m) + +func (*LabelPair) ProtoMessage() {} + +func (x *LabelPair) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_LabelPair proto.InternalMessageInfo +// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead. +func (*LabelPair) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} +} -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (x *LabelPair) GetName() string { + if x != nil && x.Name != nil { + return *x.Name } return "" } -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value +func (x *LabelPair) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value } return "" } type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} -func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{1} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` } -func (m *Gauge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Gauge.Unmarshal(m, b) -} -func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) -} -func (m *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(m, src) +func (x *Gauge) Reset() { + *x = Gauge{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Gauge) XXX_Size() int { - return xxx_messageInfo_Gauge.Size(m) + +func (x *Gauge) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Gauge) XXX_DiscardUnknown() { - xxx_messageInfo_Gauge.DiscardUnknown(m) + +func (*Gauge) ProtoMessage() {} + +func (x *Gauge) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Gauge proto.InternalMessageInfo +// Deprecated: Use Gauge.ProtoReflect.Descriptor instead. +func (*Gauge) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1} +} -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Gauge) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} -func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{2} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` } -func (m *Counter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Counter.Unmarshal(m, b) -} -func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Counter.Marshal(b, m, deterministic) -} -func (m *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(m, src) +func (x *Counter) Reset() { + *x = Counter{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Counter) XXX_Size() int { - return xxx_messageInfo_Counter.Size(m) + +func (x *Counter) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Counter) XXX_DiscardUnknown() { - xxx_messageInfo_Counter.DiscardUnknown(m) + +func (*Counter) ProtoMessage() {} + +func (x *Counter) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Counter proto.InternalMessageInfo +// Deprecated: Use Counter.ProtoReflect.Descriptor instead. +func (*Counter) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2} +} -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Counter) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } -func (m *Counter) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar +func (x *Counter) GetExemplar() *Exemplar { + if x != nil { + return x.Exemplar } return nil } type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} -func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{3} + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` } -func (m *Quantile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Quantile.Unmarshal(m, b) -} -func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) -} -func (m *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(m, src) +func (x *Quantile) Reset() { + *x = Quantile{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Quantile) XXX_Size() int { - return xxx_messageInfo_Quantile.Size(m) + +func (x *Quantile) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Quantile) XXX_DiscardUnknown() { - xxx_messageInfo_Quantile.DiscardUnknown(m) + +func (*Quantile) ProtoMessage() {} + +func (x *Quantile) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Quantile proto.InternalMessageInfo +// Deprecated: Use Quantile.ProtoReflect.Descriptor instead. +func (*Quantile) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3} +} -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile +func (x *Quantile) GetQuantile() float64 { + if x != nil && x.Quantile != nil { + return *x.Quantile } return 0 } -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Quantile) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} -func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{4} + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` } -func (m *Summary) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Summary.Unmarshal(m, b) -} -func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Summary.Marshal(b, m, deterministic) -} -func (m *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(m, src) +func (x *Summary) Reset() { + *x = Summary{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Summary) XXX_Size() int { - return xxx_messageInfo_Summary.Size(m) + +func (x *Summary) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Summary) XXX_DiscardUnknown() { - xxx_messageInfo_Summary.DiscardUnknown(m) + +func (*Summary) ProtoMessage() {} + +func (x *Summary) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Summary proto.InternalMessageInfo +// Deprecated: Use Summary.ProtoReflect.Descriptor instead. +func (*Summary) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4} +} -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount +func (x *Summary) GetSampleCount() uint64 { + if x != nil && x.SampleCount != nil { + return *x.SampleCount } return 0 } -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum +func (x *Summary) GetSampleSum() float64 { + if x != nil && x.SampleSum != nil { + return *x.SampleSum } return 0 } -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile +func (x *Summary) GetQuantile() []*Quantile { + if x != nil { + return x.Quantile } return nil } type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} -func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{5} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` } -func (m *Untyped) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Untyped.Unmarshal(m, b) -} -func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) -} -func (m *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(m, src) +func (x *Untyped) Reset() { + *x = Untyped{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Untyped) XXX_Size() int { - return xxx_messageInfo_Untyped.Size(m) + +func (x *Untyped) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Untyped) XXX_DiscardUnknown() { - xxx_messageInfo_Untyped.DiscardUnknown(m) + +func (*Untyped) ProtoMessage() {} + +func (x *Untyped) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Untyped proto.InternalMessageInfo +// Deprecated: Use Untyped.ProtoReflect.Descriptor instead. +func (*Untyped) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5} +} -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Untyped) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Histogram struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` + SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0. SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` // Buckets for the conventional histogram. - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). // In the future, more bucket schemas may be added using numbers < -4 or > 8. Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"` - ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` - ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` - ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` + ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket. + ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket. + ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0. // Negative buckets for the native histogram. NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"` // Use either "negative_delta" or "negative_count", the former for // regular histograms with integer counts, the latter for float // histograms. - NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` - NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket. // Positive buckets for the native histogram. PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float // histograms. - PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` - PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket. } -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} -func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{6} +func (x *Histogram) Reset() { + *x = Histogram{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Histogram.Unmarshal(m, b) +func (x *Histogram) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) -} -func (m *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(m, src) -} -func (m *Histogram) XXX_Size() int { - return xxx_messageInfo_Histogram.Size(m) -} -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) + +func (*Histogram) ProtoMessage() {} + +func (x *Histogram) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Histogram proto.InternalMessageInfo +// Deprecated: Use Histogram.ProtoReflect.Descriptor instead. +func (*Histogram) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6} +} -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount +func (x *Histogram) GetSampleCount() uint64 { + if x != nil && x.SampleCount != nil { + return *x.SampleCount } return 0 } -func (m *Histogram) GetSampleCountFloat() float64 { - if m != nil && m.SampleCountFloat != nil { - return *m.SampleCountFloat +func (x *Histogram) GetSampleCountFloat() float64 { + if x != nil && x.SampleCountFloat != nil { + return *x.SampleCountFloat } return 0 } -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum +func (x *Histogram) GetSampleSum() float64 { + if x != nil && x.SampleSum != nil { + return *x.SampleSum } return 0 } -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket +func (x *Histogram) GetBucket() []*Bucket { + if x != nil { + return x.Bucket } return nil } -func (m *Histogram) GetSchema() int32 { - if m != nil && m.Schema != nil { - return *m.Schema +func (x *Histogram) GetSchema() int32 { + if x != nil && x.Schema != nil { + return *x.Schema } return 0 } -func (m *Histogram) GetZeroThreshold() float64 { - if m != nil && m.ZeroThreshold != nil { - return *m.ZeroThreshold +func (x *Histogram) GetZeroThreshold() float64 { + if x != nil && x.ZeroThreshold != nil { + return *x.ZeroThreshold } return 0 } -func (m *Histogram) GetZeroCount() uint64 { - if m != nil && m.ZeroCount != nil { - return *m.ZeroCount +func (x *Histogram) GetZeroCount() uint64 { + if x != nil && x.ZeroCount != nil { + return *x.ZeroCount } return 0 } -func (m *Histogram) GetZeroCountFloat() float64 { - if m != nil && m.ZeroCountFloat != nil { - return *m.ZeroCountFloat +func (x *Histogram) GetZeroCountFloat() float64 { + if x != nil && x.ZeroCountFloat != nil { + return *x.ZeroCountFloat } return 0 } -func (m *Histogram) GetNegativeSpan() []*BucketSpan { - if m != nil { - return m.NegativeSpan +func (x *Histogram) GetNegativeSpan() []*BucketSpan { + if x != nil { + return x.NegativeSpan } return nil } -func (m *Histogram) GetNegativeDelta() []int64 { - if m != nil { - return m.NegativeDelta +func (x *Histogram) GetNegativeDelta() []int64 { + if x != nil { + return x.NegativeDelta } return nil } -func (m *Histogram) GetNegativeCount() []float64 { - if m != nil { - return m.NegativeCount +func (x *Histogram) GetNegativeCount() []float64 { + if x != nil { + return x.NegativeCount } return nil } -func (m *Histogram) GetPositiveSpan() []*BucketSpan { - if m != nil { - return m.PositiveSpan +func (x *Histogram) GetPositiveSpan() []*BucketSpan { + if x != nil { + return x.PositiveSpan } return nil } -func (m *Histogram) GetPositiveDelta() []int64 { - if m != nil { - return m.PositiveDelta +func (x *Histogram) GetPositiveDelta() []int64 { + if x != nil { + return x.PositiveDelta } return nil } -func (m *Histogram) GetPositiveCount() []float64 { - if m != nil { - return m.PositiveCount +func (x *Histogram) GetPositiveCount() []float64 { + if x != nil { + return x.PositiveCount } return nil } @@ -513,64 +598,72 @@ func (m *Histogram) GetPositiveCount() []float64 { // A Bucket of a conventional histogram, each of which is treated as // an individual counter-like time series by Prometheus. type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order. + CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0. + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive. Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} -func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{7} +func (x *Bucket) Reset() { + *x = Bucket{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Bucket.Unmarshal(m, b) -} -func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) -} -func (m *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(m, src) -} -func (m *Bucket) XXX_Size() int { - return xxx_messageInfo_Bucket.Size(m) +func (x *Bucket) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_Bucket.DiscardUnknown(m) + +func (*Bucket) ProtoMessage() {} + +func (x *Bucket) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Bucket proto.InternalMessageInfo +// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. +func (*Bucket) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7} +} -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount +func (x *Bucket) GetCumulativeCount() uint64 { + if x != nil && x.CumulativeCount != nil { + return *x.CumulativeCount } return 0 } -func (m *Bucket) GetCumulativeCountFloat() float64 { - if m != nil && m.CumulativeCountFloat != nil { - return *m.CumulativeCountFloat +func (x *Bucket) GetCumulativeCountFloat() float64 { + if x != nil && x.CumulativeCountFloat != nil { + return *x.CumulativeCountFloat } return 0 } -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound +func (x *Bucket) GetUpperBound() float64 { + if x != nil && x.UpperBound != nil { + return *x.UpperBound } return 0 } -func (m *Bucket) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar +func (x *Bucket) GetExemplar() *Exemplar { + if x != nil { + return x.Exemplar } return nil } @@ -582,333 +675,658 @@ func (m *Bucket) GetExemplar() *Exemplar { // structured here (with all the buckets in a single array separate // from the Spans). type BucketSpan struct { - Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` - Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *BucketSpan) Reset() { *m = BucketSpan{} } -func (m *BucketSpan) String() string { return proto.CompactTextString(m) } -func (*BucketSpan) ProtoMessage() {} -func (*BucketSpan) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{8} + Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative). + Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets. } -func (m *BucketSpan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BucketSpan.Unmarshal(m, b) -} -func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) -} -func (m *BucketSpan) XXX_Merge(src proto.Message) { - xxx_messageInfo_BucketSpan.Merge(m, src) +func (x *BucketSpan) Reset() { + *x = BucketSpan{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *BucketSpan) XXX_Size() int { - return xxx_messageInfo_BucketSpan.Size(m) + +func (x *BucketSpan) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BucketSpan) XXX_DiscardUnknown() { - xxx_messageInfo_BucketSpan.DiscardUnknown(m) + +func (*BucketSpan) ProtoMessage() {} + +func (x *BucketSpan) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_BucketSpan proto.InternalMessageInfo +// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead. +func (*BucketSpan) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8} +} -func (m *BucketSpan) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset +func (x *BucketSpan) GetOffset() int32 { + if x != nil && x.Offset != nil { + return *x.Offset } return 0 } -func (m *BucketSpan) GetLength() uint32 { - if m != nil && m.Length != nil { - return *m.Length +func (x *BucketSpan) GetLength() uint32 { + if x != nil && x.Length != nil { + return *x.Length } return 0 } type Exemplar struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Exemplar) Reset() { *m = Exemplar{} } -func (m *Exemplar) String() string { return proto.CompactTextString(m) } -func (*Exemplar) ProtoMessage() {} -func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{9} + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style. } -func (m *Exemplar) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Exemplar.Unmarshal(m, b) -} -func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) -} -func (m *Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_Exemplar.Merge(m, src) +func (x *Exemplar) Reset() { + *x = Exemplar{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Exemplar) XXX_Size() int { - return xxx_messageInfo_Exemplar.Size(m) + +func (x *Exemplar) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_Exemplar.DiscardUnknown(m) + +func (*Exemplar) ProtoMessage() {} + +func (x *Exemplar) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Exemplar proto.InternalMessageInfo +// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead. +func (*Exemplar) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9} +} -func (m *Exemplar) GetLabel() []*LabelPair { - if m != nil { - return m.Label +func (x *Exemplar) GetLabel() []*LabelPair { + if x != nil { + return x.Label } return nil } -func (m *Exemplar) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Exemplar) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } -func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp +func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp } return nil } type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{10} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` +} + +func (x *Metric) Reset() { + *x = Metric{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) +func (x *Metric) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) + +func (*Metric) ProtoMessage() {} + +func (x *Metric) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Metric proto.InternalMessageInfo +// Deprecated: Use Metric.ProtoReflect.Descriptor instead. +func (*Metric) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10} +} -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label +func (x *Metric) GetLabel() []*LabelPair { + if x != nil { + return x.Label } return nil } -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge +func (x *Metric) GetGauge() *Gauge { + if x != nil { + return x.Gauge } return nil } -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter +func (x *Metric) GetCounter() *Counter { + if x != nil { + return x.Counter } return nil } -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary +func (x *Metric) GetSummary() *Summary { + if x != nil { + return x.Summary } return nil } -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped +func (x *Metric) GetUntyped() *Untyped { + if x != nil { + return x.Untyped } return nil } -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram +func (x *Metric) GetHistogram() *Histogram { + if x != nil { + return x.Histogram } return nil } -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs +func (x *Metric) GetTimestampMs() int64 { + if x != nil && x.TimestampMs != nil { + return *x.TimestampMs } return 0 } type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} -func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{11} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` +} + +func (x *MetricFamily) Reset() { + *x = MetricFamily{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *MetricFamily) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricFamily.Unmarshal(m, b) -} -func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) -} -func (m *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(m, src) -} -func (m *MetricFamily) XXX_Size() int { - return xxx_messageInfo_MetricFamily.Size(m) +func (x *MetricFamily) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MetricFamily) XXX_DiscardUnknown() { - xxx_messageInfo_MetricFamily.DiscardUnknown(m) + +func (*MetricFamily) ProtoMessage() {} + +func (x *MetricFamily) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_MetricFamily proto.InternalMessageInfo +// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead. +func (*MetricFamily) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11} +} -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (x *MetricFamily) GetName() string { + if x != nil && x.Name != nil { + return *x.Name } return "" } -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help +func (x *MetricFamily) GetHelp() string { + if x != nil && x.Help != nil { + return *x.Help } return "" } -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type +func (x *MetricFamily) GetType() MetricType { + if x != nil && x.Type != nil { + return *x.Type } return MetricType_COUNTER } -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric +func (x *MetricFamily) GetMetric() []*Metric { + if x != nil { + return x.Metric } return nil } -func init() { - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) - proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") - proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") - proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") - proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") - proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") - proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") - proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") - proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") - proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan") - proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") - proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") - proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") -} - -func init() { - proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258) -} - -var fileDescriptor_d1e5ddb18987a258 = []byte{ - // 896 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44, - 0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48, - 0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92, - 0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0, - 0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b, - 0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3, - 0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90, - 0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25, - 0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb, - 0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19, - 0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba, - 0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b, - 0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c, - 0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0, - 0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5, - 0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd, - 0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d, - 0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b, - 0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b, - 0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f, - 0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b, - 0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8, - 0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e, - 0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79, - 0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29, - 0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48, - 0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca, - 0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9, - 0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5, - 0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe, - 0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55, - 0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e, - 0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83, - 0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65, - 0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a, - 0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8, - 0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf, - 0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1, - 0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97, - 0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc, - 0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7, - 0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b, - 0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58, - 0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b, - 0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8, - 0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f, - 0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67, - 0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28, - 0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27, - 0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41, - 0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f, - 0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f, - 0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac, - 0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a, - 0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab, - 0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00, +var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor + +var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0x5b, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, + 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, + 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x87, 0x01, 0x0a, + 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, + 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, + 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, + 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, + 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, + 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, + 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, + 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x73, 0x70, 0x61, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, + 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, + 0x6e, 0x52, 0x0c, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, + 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, + 0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, + 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, + 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, + 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, + 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, + 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, + 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, + 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, + 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, + 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, + 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, + 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, + 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, + 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, + 0x62, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, + 0x07, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, + 0x55, 0x47, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, + 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, + 0x0d, 0x0a, 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, + 0x0a, 0x0f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, + 0x4d, 0x10, 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, + 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, + 0x67, 0x6f, 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, +} + +var ( + file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once + file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc +) + +func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte { + file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() { + file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData) + }) + return file_io_prometheus_client_metrics_proto_rawDescData +} + +var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{ + (MetricType)(0), // 0: io.prometheus.client.MetricType + (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair + (*Gauge)(nil), // 2: io.prometheus.client.Gauge + (*Counter)(nil), // 3: io.prometheus.client.Counter + (*Quantile)(nil), // 4: io.prometheus.client.Quantile + (*Summary)(nil), // 5: io.prometheus.client.Summary + (*Untyped)(nil), // 6: io.prometheus.client.Untyped + (*Histogram)(nil), // 7: io.prometheus.client.Histogram + (*Bucket)(nil), // 8: io.prometheus.client.Bucket + (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan + (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar + (*Metric)(nil), // 11: io.prometheus.client.Metric + (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily + (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp +} +var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ + 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar + 4, // 1: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile + 8, // 2: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket + 9, // 3: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan + 9, // 4: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan + 10, // 5: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 6: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 7: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 8: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 9: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 10: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 11: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 12: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 13: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 14: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 15: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_io_prometheus_client_metrics_proto_init() } +func file_io_prometheus_client_metrics_proto_init() { + if File_io_prometheus_client_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LabelPair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Gauge); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Counter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Quantile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Summary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Untyped); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Histogram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BucketSpan); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Exemplar); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metric); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricFamily); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc, + NumEnums: 1, + NumMessages: 12, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_io_prometheus_client_metrics_proto_goTypes, + DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs, + EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes, + MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes, + }.Build() + File_io_prometheus_client_metrics_proto = out.File + file_io_prometheus_client_metrics_proto_rawDesc = nil + file_io_prometheus_client_metrics_proto_goTypes = nil + file_io_prometheus_client_metrics_proto_depIdxs = nil } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 7657f841d63..f4fc8845522 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -115,32 +115,28 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader - p TextParser - fams []*dto.MetricFamily + fams map[string]*dto.MetricFamily + err error } // Decode implements the Decoder interface. func (d *textDecoder) Decode(v *dto.MetricFamily) error { - // TODO(fabxc): Wrap this as a line reader to make streaming safer. - if len(d.fams) == 0 { - // No cached metric families, read everything and parse metrics. - fams, err := d.p.TextToMetricFamilies(d.r) - if err != nil { - return err - } - if len(fams) == 0 { - return io.EOF - } - d.fams = make([]*dto.MetricFamily, 0, len(fams)) - for _, f := range fams { - d.fams = append(d.fams, f) + if d.err == nil { + // Read all metrics in one shot. + var p TextParser + d.fams, d.err = p.TextToMetricFamilies(d.r) + // If we don't get an error, store io.EOF for the end. + if d.err == nil { + d.err = io.EOF } } - - *v = *d.fams[0] - d.fams = d.fams[1:] - - return nil + // Pick off one MetricFamily per Decode until there's nothing left. + for key, fam := range d.fams { + *v = *fam + delete(d.fams, key) + return nil + } + return d.err } // SampleDecoder wraps a Decoder to extract samples from the metric families diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index f819e4f8b54..dfac962a4e7 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -21,8 +21,8 @@ import "bytes" // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 9d94ae9effe..21cdddcf054 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -46,20 +46,20 @@ import ( // missing features and peculiarities to avoid complications when switching from // Prometheus to OpenMetrics or vice versa: // -// - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, -// its type will be set to `unknown` in that case to avoid invalid OpenMetrics -// output. +// - Counters are expected to have the `_total` suffix in their metric name. In +// the output, the suffix will be truncated from the `# TYPE` and `# HELP` +// line. A counter with a missing `_total` suffix is not an error. However, +// its type will be set to `unknown` in that case to avoid invalid OpenMetrics +// output. // -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. +// - No support for the following (optional) features: `# UNIT` line, `_created` +// line, info type, stateset type, gaugehistogram type. // -// - The size of exemplar labels is not checked (i.e. it's possible to create -// exemplars that are larger than allowed by the OpenMetrics specification). +// - The size of exemplar labels is not checked (i.e. it's possible to create +// exemplars that are larger than allowed by the OpenMetrics specification). // -// - The value of Counters is not checked. (OpenMetrics doesn't allow counters -// with a `NaN` value.) +// - The value of Counters is not checked. (OpenMetrics doesn't allow counters +// with a `NaN` value.) func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { name := in.GetName() if name == "" { diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 5ba503b0654..2946b8f1a64 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -17,7 +17,6 @@ import ( "bufio" "fmt" "io" - "io/ioutil" "math" "strconv" "strings" @@ -44,7 +43,7 @@ const ( var ( bufPool = sync.Pool{ New: func() interface{} { - return bufio.NewWriter(ioutil.Discard) + return bufio.NewWriter(io.Discard) }, } numBufPool = sync.Pool{ diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 84be0643ec6..ac2482782c7 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -142,9 +142,13 @@ func (p *TextParser) reset(in io.Reader) { func (p *TextParser) startOfLine() stateFn { p.lineCount++ if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil + // This is the only place that we expect to see io.EOF, + // which is not an error but the signal that we are done. + // Any other error that happens to align with the start of + // a line is still an error. + if p.err == io.EOF { + p.err = nil + } return nil } switch p.currentByte { diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go index 26e92288c7c..a21b9d15dd8 100644 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -11,18 +11,18 @@ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -35,8 +35,6 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - */ package goautoneg diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index c909b8aa8c5..5727452c1ee 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -18,7 +18,6 @@ import ( "errors" "fmt" "math" - "regexp" "strconv" "strings" "time" @@ -183,54 +182,78 @@ func (d *Duration) Type() string { return "duration" } -var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$") +func isdigit(c byte) bool { return c >= '0' && c <= '9' } + +// Units are required to go in order from biggest to smallest. +// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day. +var unitMap = map[string]struct { + pos int + mult uint64 +}{ + "ms": {7, uint64(time.Millisecond)}, + "s": {6, uint64(time.Second)}, + "m": {5, uint64(time.Minute)}, + "h": {4, uint64(time.Hour)}, + "d": {3, uint64(24 * time.Hour)}, + "w": {2, uint64(7 * 24 * time.Hour)}, + "y": {1, uint64(365 * 24 * time.Hour)}, +} // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - switch durationStr { +func ParseDuration(s string) (Duration, error) { + switch s { case "0": // Allow 0 without a unit. return 0, nil case "": return 0, errors.New("empty duration string") } - matches := durationRE.FindStringSubmatch(durationStr) - if matches == nil { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) - } - var dur time.Duration - // Parse the match at pos `pos` in the regex and use `mult` to turn that - // into ms, then add that value to the total parsed duration. - var overflowErr error - m := func(pos int, mult time.Duration) { - if matches[pos] == "" { - return + orig := s + var dur uint64 + lastUnitPos := 0 + + for s != "" { + if !isdigit(s[0]) { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + // Consume [0-9]* + i := 0 + for ; i < len(s) && isdigit(s[i]); i++ { + } + v, err := strconv.ParseUint(s[:i], 10, 0) + if err != nil { + return 0, fmt.Errorf("not a valid duration string: %q", orig) } - n, _ := strconv.Atoi(matches[pos]) + s = s[i:] + // Consume unit. + for i = 0; i < len(s) && !isdigit(s[i]); i++ { + } + if i == 0 { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig) + } + if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest. + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + lastUnitPos = unit.pos // Check if the provided duration overflows time.Duration (> ~ 290years). - if n > int((1<<63-1)/mult/time.Millisecond) { - overflowErr = errors.New("duration out of range") + if v > 1<<63/unit.mult { + return 0, errors.New("duration out of range") } - d := time.Duration(n) * time.Millisecond - dur += d * mult - - if dur < 0 { - overflowErr = errors.New("duration out of range") + dur += v * unit.mult + if dur > 1<<63-1 { + return 0, errors.New("duration out of range") } } - - m(2, 1000*60*60*24*365) // y - m(4, 1000*60*60*24*7) // w - m(6, 1000*60*60*24) // d - m(8, 1000*60*60) // h - m(10, 1000*60) // m - m(12, 1000) // s - m(14, 1) // ms - - return Duration(dur), overflowErr + return Duration(dur), nil } func (d Duration) String() string { diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index c9d8fb1a283..9eb440413fd 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -16,20 +16,12 @@ package model import ( "encoding/json" "fmt" - "math" "sort" "strconv" "strings" ) var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} - // ZeroSample is the pseudo zero-value of Sample used to signal a // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, // and metric nil. Note that the natural zero value of Sample has a timestamp @@ -38,82 +30,14 @@ var ( ZeroSample = Sample{Timestamp: Earliest} ) -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. +// Sample is a sample pair associated with a metric. A single sample must either +// define Value or Histogram but not both. Histogram == nil implies the Value +// field is used, otherwise it should be ignored. type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` + Histogram *SampleHistogram `json:"histogram"` } // Equal compares first the metrics, then the timestamp, then the value. The @@ -129,11 +53,19 @@ func (s *Sample) Equal(o *Sample) bool { if !s.Timestamp.Equal(o.Timestamp) { return false } - + if s.Histogram != nil { + return s.Histogram.Equal(o.Histogram) + } return s.Value.Equal(o.Value) } func (s Sample) String() string { + if s.Histogram != nil { + return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }) + } return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ Timestamp: s.Timestamp, Value: s.Value, @@ -142,6 +74,19 @@ func (s Sample) String() string { // MarshalJSON implements json.Marshaler. func (s Sample) MarshalJSON() ([]byte, error) { + if s.Histogram != nil { + v := struct { + Metric Metric `json:"metric"` + Histogram SampleHistogramPair `json:"histogram"` + }{ + Metric: s.Metric, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, + } + return json.Marshal(&v) + } v := struct { Metric Metric `json:"metric"` Value SamplePair `json:"value"` @@ -152,21 +97,25 @@ func (s Sample) MarshalJSON() ([]byte, error) { Value: s.Value, }, } - return json.Marshal(&v) } // UnmarshalJSON implements json.Unmarshaler. func (s *Sample) UnmarshalJSON(b []byte) error { v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + Histogram SampleHistogramPair `json:"histogram"` }{ Metric: s.Metric, Value: SamplePair{ Timestamp: s.Timestamp, Value: s.Value, }, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, } if err := json.Unmarshal(b, &v); err != nil { @@ -174,8 +123,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error { } s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value + if v.Histogram.Histogram != nil { + s.Timestamp = v.Histogram.Timestamp + s.Histogram = v.Histogram.Histogram + } else { + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + } return nil } @@ -221,80 +175,76 @@ func (s Samples) Equal(o Samples) bool { // SampleStream is a stream of Values belonging to an attached COWMetric. type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` } func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) + valuesLength := len(ss.Values) + vals := make([]string, valuesLength+len(ss.Histograms)) for i, v := range ss.Values { vals[i] = v.String() } + for i, v := range ss.Histograms { + vals[i+valuesLength] = v.String() + } return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) } -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string +func (ss SampleStream) MarshalJSON() ([]byte, error) { + if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else if len(ss.Histograms) > 0 { + v := struct { + Metric Metric `json:"metric"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + }{ + Metric: ss.Metric, + Values: ss.Values, + } + return json.Marshal(&v) + } } -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} +func (ss *SampleStream) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { + if err := json.Unmarshal(b, &v); err != nil { return err } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") + ss.Metric = v.Metric + ss.Values = v.Values + ss.Histograms = v.Histograms + + return nil } // Scalar is a scalar value evaluated at the set timestamp. diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go new file mode 100644 index 00000000000..0f615a70530 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -0,0 +1,100 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "strconv" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go new file mode 100644 index 00000000000..54bb038cfff --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -0,0 +1,178 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type FloatString float64 + +func (v FloatString) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +func (v FloatString) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func (v *FloatString) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("float value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = FloatString(f) + return nil +} + +type HistogramBucket struct { + Boundaries int32 + Lower FloatString + Upper FloatString + Count FloatString +} + +func (s HistogramBucket) MarshalJSON() ([]byte, error) { + b, err := json.Marshal(s.Boundaries) + if err != nil { + return nil, err + } + l, err := json.Marshal(s.Lower) + if err != nil { + return nil, err + } + u, err := json.Marshal(s.Upper) + if err != nil { + return nil, err + } + c, err := json.Marshal(s.Count) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil +} + +func (s *HistogramBucket) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + return nil +} + +func (s *HistogramBucket) Equal(o *HistogramBucket) bool { + return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) +} + +func (b HistogramBucket) String() string { + var sb strings.Builder + lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 + upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + if lowerInclusive { + sb.WriteRune('[') + } else { + sb.WriteRune('(') + } + fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + if upperInclusive { + sb.WriteRune(']') + } else { + sb.WriteRune(')') + } + fmt.Fprintf(&sb, ":%v", b.Count) + return sb.String() +} + +type HistogramBuckets []*HistogramBucket + +func (s HistogramBuckets) Equal(o HistogramBuckets) bool { + if len(s) != len(o) { + return false + } + + for i, bucket := range s { + if !bucket.Equal(o[i]) { + return false + } + } + return true +} + +type SampleHistogram struct { + Count FloatString `json:"count"` + Sum FloatString `json:"sum"` + Buckets HistogramBuckets `json:"buckets"` +} + +func (s SampleHistogram) String() string { + return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets) +} + +func (s *SampleHistogram) Equal(o *SampleHistogram) bool { + return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets)) +} + +type SampleHistogramPair struct { + Timestamp Time + // Histogram should never be nil, it's only stored as pointer for efficiency. + Histogram *SampleHistogram +} + +func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { + if s.Histogram == nil { + return nil, fmt.Errorf("histogram is nil") + } + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Histogram) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Timestamp, &s.Histogram} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + if s.Histogram == nil { + return fmt.Errorf("histogram is null") + } + return nil +} + +func (s SampleHistogramPair) String() string { + return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp) +} + +func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool { + return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp)) +} diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go new file mode 100644 index 00000000000..726c50ee638 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_type.go @@ -0,0 +1,83 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" +) + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 6c8e3e21979..e358db69c5d 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -55,19 +55,22 @@ ifneq ($(shell which gotestsum),) endif endif -PROMU_VERSION ?= 0.13.0 +PROMU_VERSION ?= 0.14.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz +SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.45.2 +GOLANGCI_LINT_VERSION ?= v1.49.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) # If we're in CI and there is an Actions file, that means the linter # is being run in Actions, so we don't need to run it here. - ifeq (,$(CIRCLE_JOB)) + ifneq (,$(SKIP_GOLANGCI_LINT)) + GOLANGCI_LINT := + else ifeq (,$(CIRCLE_JOB)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index ff6b927da15..06968ca2ed4 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -380,6 +380,42 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { return cpuinfo, nil } +func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + systemType := field[1] + i := 0 + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + cpuinfo[i].VendorID = systemType + case "CPU Family": + cpuinfo[i].CPUFamily = field[1] + case "Model Name": + cpuinfo[i].ModelName = field[1] + } + } + return cpuinfo, nil +} + func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(info)) diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go new file mode 100644 index 00000000000..d88442f0edf --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoLoong diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go index ea41bf2ca1e..a6b2b3127cb 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x -// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x +//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x +// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x package procfs diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go index d31a82600f6..f9d961e4417 100644 --- a/vendor/github.com/prometheus/procfs/doc.go +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -16,30 +16,29 @@ // // Example: // -// package main -// -// import ( -// "fmt" -// "log" -// -// "github.com/prometheus/procfs" -// ) -// -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } -// -// stat, err := p.Stat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } -// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.Stat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } package procfs diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index f7a828bb1da..0c482c18ccf 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -284,7 +284,8 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { } // parseMount parses an entry in /proc/[pid]/mountstats in the format: -// device [device] mounted on [mount] with fstype [type] +// +// device [device] mounted on [mount] with fstype [type] func parseMount(ss []string) (*Mount, error) { if len(ss) < deviceEntryLen { return nil, fmt.Errorf("invalid device entry: %v", ss) diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index a94f86dc4ae..06b7b8f2163 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -27,8 +27,9 @@ import ( // For the proc file format details, // See: // * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 -// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 -// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. +// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 +// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 +// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 // SoftnetStat contains a single row of data from /proc/net/softnet_stat. type SoftnetStat struct { @@ -38,6 +39,18 @@ type SoftnetStat struct { Dropped uint32 // Number of times processing packets ran out of quota. TimeSqueezed uint32 + // Number of collision occur while obtaining device lock while transmitting. + CPUCollision uint32 + // Number of times cpu woken up received_rps. + ReceivedRps uint32 + // number of times flow limit has been reached. + FlowLimitCount uint32 + // Softnet backlog status. + SoftnetBacklogLen uint32 + // CPU id owning this softnet_data. + Index uint32 + // softnet_data's Width. + Width int } var softNetProcFile = "net/softnet_stat" @@ -66,22 +79,57 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { for s.Scan() { columns := strings.Fields(s.Text()) width := len(columns) + softnetStat := SoftnetStat{} if width < minColumns { return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) } - // We only parse the first three columns at the moment. - us, err := parseHexUint32s(columns[0:3]) - if err != nil { - return nil, err + // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 + if width >= minColumns { + us, err := parseHexUint32s(columns[0:9]) + if err != nil { + return nil, err + } + + softnetStat.Processed = us[0] + softnetStat.Dropped = us[1] + softnetStat.TimeSqueezed = us[2] + softnetStat.CPUCollision = us[8] + } + + // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 + if width >= 10 { + us, err := parseHexUint32s(columns[9:10]) + if err != nil { + return nil, err + } + + softnetStat.ReceivedRps = us[0] } - stats = append(stats, SoftnetStat{ - Processed: us[0], - Dropped: us[1], - TimeSqueezed: us[2], - }) + // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 + if width >= 11 { + us, err := parseHexUint32s(columns[10:11]) + if err != nil { + return nil, err + } + + softnetStat.FlowLimitCount = us[0] + } + + // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 + if width >= 13 { + us, err := parseHexUint32s(columns[11:13]) + if err != nil { + return nil, err + } + + softnetStat.SoftnetBacklogLen = us[0] + softnetStat.Index = us[1] + } + softnetStat.Width = width + stats = append(stats, softnetStat) } return stats, nil diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index dcea9c5a671..5cc40aef55b 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -15,6 +15,7 @@ package procfs import ( "bufio" + "io" "os" "path/filepath" "strconv" @@ -42,27 +43,43 @@ func (fs FS) NetStat() ([]NetStat, error) { return nil, err } - netStatFile := NetStat{ - Filename: filepath.Base(filePath), - Stats: make(map[string][]uint64), + procNetstat, err := parseNetstat(file) + if err != nil { + return nil, err + } + procNetstat.Filename = filepath.Base(filePath) + + netStatsTotal = append(netStatsTotal, procNetstat) + } + return netStatsTotal, nil +} + +// parseNetstat parses the metrics from `/proc/net/stat/` file +// and returns a NetStat structure. +func parseNetstat(r io.Reader) (NetStat, error) { + var ( + scanner = bufio.NewScanner(r) + netStat = NetStat{ + Stats: make(map[string][]uint64), } - scanner := bufio.NewScanner(file) - scanner.Scan() - // First string is always a header for stats - var headers []string - headers = append(headers, strings.Fields(scanner.Text())...) + ) + + scanner.Scan() - // Other strings represent per-CPU counters - for scanner.Scan() { - for num, counter := range strings.Fields(scanner.Text()) { - value, err := strconv.ParseUint(counter, 16, 64) - if err != nil { - return nil, err - } - netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value) + // First string is always a header for stats + var headers []string + headers = append(headers, strings.Fields(scanner.Text())...) + + // Other strings represent per-CPU counters + for scanner.Scan() { + for num, counter := range strings.Fields(scanner.Text()) { + value, err := strconv.ParseUint(counter, 16, 64) + if err != nil { + return NetStat{}, err } + netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value) } - netStatsTotal = append(netStatsTotal, netStatFile) } - return netStatsTotal, nil + + return netStat, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index cca03327c3f..ea83a75ffc4 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a +// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a // specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go new file mode 100644 index 00000000000..9df79c23799 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -0,0 +1,98 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Interrupt represents a single interrupt line. +type Interrupt struct { + // Info is the type of interrupt. + Info string + // Devices is the name of the device that is located at that IRQ + Devices string + // Values is the number of interrupts per CPU. + Values []string +} + +// Interrupts models the content of /proc/interrupts. Key is the IRQ number. +// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts +// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output +type Interrupts map[string]Interrupt + +// Interrupts creates a new instance from a given Proc instance. +func (p Proc) Interrupts() (Interrupts, error) { + data, err := util.ReadFileNoStat(p.path("interrupts")) + if err != nil { + return nil, err + } + return parseInterrupts(bytes.NewReader(data)) +} + +func parseInterrupts(r io.Reader) (Interrupts, error) { + var ( + interrupts = Interrupts{} + scanner = bufio.NewScanner(r) + ) + + if !scanner.Scan() { + return nil, errors.New("interrupts empty") + } + cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + if len(parts) == 0 { // skip empty lines + continue + } + if len(parts) < 2 { + return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts) + } + intName := parts[0][:len(parts[0])-1] // remove trailing : + + if len(parts) == 2 { + interrupts[intName] = Interrupt{ + Info: "", + Devices: "", + Values: []string{ + parts[1], + }, + } + continue + } + + intr := Interrupt{ + Values: parts[1 : cpuNum+1], + } + + if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt + intr.Info = parts[cpuNum+1] + intr.Devices = strings.Join(parts[cpuNum+2:], " ") + } else { + intr.Info = strings.Join(parts[cpuNum+1:], " ") + } + interrupts[intName] = intr + } + + return interrupts, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 48b5238194e..6a43bb24595 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -33,139 +33,140 @@ type ProcNetstat struct { } type TcpExt struct { // nolint:revive - SyncookiesSent float64 - SyncookiesRecv float64 - SyncookiesFailed float64 - EmbryonicRsts float64 - PruneCalled float64 - RcvPruned float64 - OfoPruned float64 - OutOfWindowIcmps float64 - LockDroppedIcmps float64 - ArpFilter float64 - TW float64 - TWRecycled float64 - TWKilled float64 - PAWSActive float64 - PAWSEstab float64 - DelayedACKs float64 - DelayedACKLocked float64 - DelayedACKLost float64 - ListenOverflows float64 - ListenDrops float64 - TCPHPHits float64 - TCPPureAcks float64 - TCPHPAcks float64 - TCPRenoRecovery float64 - TCPSackRecovery float64 - TCPSACKReneging float64 - TCPSACKReorder float64 - TCPRenoReorder float64 - TCPTSReorder float64 - TCPFullUndo float64 - TCPPartialUndo float64 - TCPDSACKUndo float64 - TCPLossUndo float64 - TCPLostRetransmit float64 - TCPRenoFailures float64 - TCPSackFailures float64 - TCPLossFailures float64 - TCPFastRetrans float64 - TCPSlowStartRetrans float64 - TCPTimeouts float64 - TCPLossProbes float64 - TCPLossProbeRecovery float64 - TCPRenoRecoveryFail float64 - TCPSackRecoveryFail float64 - TCPRcvCollapsed float64 - TCPDSACKOldSent float64 - TCPDSACKOfoSent float64 - TCPDSACKRecv float64 - TCPDSACKOfoRecv float64 - TCPAbortOnData float64 - TCPAbortOnClose float64 - TCPAbortOnMemory float64 - TCPAbortOnTimeout float64 - TCPAbortOnLinger float64 - TCPAbortFailed float64 - TCPMemoryPressures float64 - TCPMemoryPressuresChrono float64 - TCPSACKDiscard float64 - TCPDSACKIgnoredOld float64 - TCPDSACKIgnoredNoUndo float64 - TCPSpuriousRTOs float64 - TCPMD5NotFound float64 - TCPMD5Unexpected float64 - TCPMD5Failure float64 - TCPSackShifted float64 - TCPSackMerged float64 - TCPSackShiftFallback float64 - TCPBacklogDrop float64 - PFMemallocDrop float64 - TCPMinTTLDrop float64 - TCPDeferAcceptDrop float64 - IPReversePathFilter float64 - TCPTimeWaitOverflow float64 - TCPReqQFullDoCookies float64 - TCPReqQFullDrop float64 - TCPRetransFail float64 - TCPRcvCoalesce float64 - TCPOFOQueue float64 - TCPOFODrop float64 - TCPOFOMerge float64 - TCPChallengeACK float64 - TCPSYNChallenge float64 - TCPFastOpenActive float64 - TCPFastOpenActiveFail float64 - TCPFastOpenPassive float64 - TCPFastOpenPassiveFail float64 - TCPFastOpenListenOverflow float64 - TCPFastOpenCookieReqd float64 - TCPFastOpenBlackhole float64 - TCPSpuriousRtxHostQueues float64 - BusyPollRxPackets float64 - TCPAutoCorking float64 - TCPFromZeroWindowAdv float64 - TCPToZeroWindowAdv float64 - TCPWantZeroWindowAdv float64 - TCPSynRetrans float64 - TCPOrigDataSent float64 - TCPHystartTrainDetect float64 - TCPHystartTrainCwnd float64 - TCPHystartDelayDetect float64 - TCPHystartDelayCwnd float64 - TCPACKSkippedSynRecv float64 - TCPACKSkippedPAWS float64 - TCPACKSkippedSeq float64 - TCPACKSkippedFinWait2 float64 - TCPACKSkippedTimeWait float64 - TCPACKSkippedChallenge float64 - TCPWinProbe float64 - TCPKeepAlive float64 - TCPMTUPFail float64 - TCPMTUPSuccess float64 - TCPWqueueTooBig float64 + SyncookiesSent *float64 + SyncookiesRecv *float64 + SyncookiesFailed *float64 + EmbryonicRsts *float64 + PruneCalled *float64 + RcvPruned *float64 + OfoPruned *float64 + OutOfWindowIcmps *float64 + LockDroppedIcmps *float64 + ArpFilter *float64 + TW *float64 + TWRecycled *float64 + TWKilled *float64 + PAWSActive *float64 + PAWSEstab *float64 + DelayedACKs *float64 + DelayedACKLocked *float64 + DelayedACKLost *float64 + ListenOverflows *float64 + ListenDrops *float64 + TCPHPHits *float64 + TCPPureAcks *float64 + TCPHPAcks *float64 + TCPRenoRecovery *float64 + TCPSackRecovery *float64 + TCPSACKReneging *float64 + TCPSACKReorder *float64 + TCPRenoReorder *float64 + TCPTSReorder *float64 + TCPFullUndo *float64 + TCPPartialUndo *float64 + TCPDSACKUndo *float64 + TCPLossUndo *float64 + TCPLostRetransmit *float64 + TCPRenoFailures *float64 + TCPSackFailures *float64 + TCPLossFailures *float64 + TCPFastRetrans *float64 + TCPSlowStartRetrans *float64 + TCPTimeouts *float64 + TCPLossProbes *float64 + TCPLossProbeRecovery *float64 + TCPRenoRecoveryFail *float64 + TCPSackRecoveryFail *float64 + TCPRcvCollapsed *float64 + TCPDSACKOldSent *float64 + TCPDSACKOfoSent *float64 + TCPDSACKRecv *float64 + TCPDSACKOfoRecv *float64 + TCPAbortOnData *float64 + TCPAbortOnClose *float64 + TCPAbortOnMemory *float64 + TCPAbortOnTimeout *float64 + TCPAbortOnLinger *float64 + TCPAbortFailed *float64 + TCPMemoryPressures *float64 + TCPMemoryPressuresChrono *float64 + TCPSACKDiscard *float64 + TCPDSACKIgnoredOld *float64 + TCPDSACKIgnoredNoUndo *float64 + TCPSpuriousRTOs *float64 + TCPMD5NotFound *float64 + TCPMD5Unexpected *float64 + TCPMD5Failure *float64 + TCPSackShifted *float64 + TCPSackMerged *float64 + TCPSackShiftFallback *float64 + TCPBacklogDrop *float64 + PFMemallocDrop *float64 + TCPMinTTLDrop *float64 + TCPDeferAcceptDrop *float64 + IPReversePathFilter *float64 + TCPTimeWaitOverflow *float64 + TCPReqQFullDoCookies *float64 + TCPReqQFullDrop *float64 + TCPRetransFail *float64 + TCPRcvCoalesce *float64 + TCPRcvQDrop *float64 + TCPOFOQueue *float64 + TCPOFODrop *float64 + TCPOFOMerge *float64 + TCPChallengeACK *float64 + TCPSYNChallenge *float64 + TCPFastOpenActive *float64 + TCPFastOpenActiveFail *float64 + TCPFastOpenPassive *float64 + TCPFastOpenPassiveFail *float64 + TCPFastOpenListenOverflow *float64 + TCPFastOpenCookieReqd *float64 + TCPFastOpenBlackhole *float64 + TCPSpuriousRtxHostQueues *float64 + BusyPollRxPackets *float64 + TCPAutoCorking *float64 + TCPFromZeroWindowAdv *float64 + TCPToZeroWindowAdv *float64 + TCPWantZeroWindowAdv *float64 + TCPSynRetrans *float64 + TCPOrigDataSent *float64 + TCPHystartTrainDetect *float64 + TCPHystartTrainCwnd *float64 + TCPHystartDelayDetect *float64 + TCPHystartDelayCwnd *float64 + TCPACKSkippedSynRecv *float64 + TCPACKSkippedPAWS *float64 + TCPACKSkippedSeq *float64 + TCPACKSkippedFinWait2 *float64 + TCPACKSkippedTimeWait *float64 + TCPACKSkippedChallenge *float64 + TCPWinProbe *float64 + TCPKeepAlive *float64 + TCPMTUPFail *float64 + TCPMTUPSuccess *float64 + TCPWqueueTooBig *float64 } type IpExt struct { // nolint:revive - InNoRoutes float64 - InTruncatedPkts float64 - InMcastPkts float64 - OutMcastPkts float64 - InBcastPkts float64 - OutBcastPkts float64 - InOctets float64 - OutOctets float64 - InMcastOctets float64 - OutMcastOctets float64 - InBcastOctets float64 - OutBcastOctets float64 - InCsumErrors float64 - InNoECTPkts float64 - InECT1Pkts float64 - InECT0Pkts float64 - InCEPkts float64 - ReasmOverlaps float64 + InNoRoutes *float64 + InTruncatedPkts *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InBcastPkts *float64 + OutBcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InCsumErrors *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 + ReasmOverlaps *float64 } func (p Proc) Netstat() (ProcNetstat, error) { @@ -174,14 +175,14 @@ func (p Proc) Netstat() (ProcNetstat, error) { if err != nil { return ProcNetstat{PID: p.PID}, err } - procNetstat, err := parseNetstat(bytes.NewReader(data), filename) + procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename) procNetstat.PID = p.PID return procNetstat, err } -// parseNetstat parses the metrics from proc//net/netstat file +// parseProcNetstat parses the metrics from proc//net/netstat file // and returns a ProcNetstat structure. -func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { +func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { var ( scanner = bufio.NewScanner(r) procNetstat = ProcNetstat{} @@ -208,230 +209,232 @@ func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { case "TcpExt": switch key { case "SyncookiesSent": - procNetstat.TcpExt.SyncookiesSent = value + procNetstat.TcpExt.SyncookiesSent = &value case "SyncookiesRecv": - procNetstat.TcpExt.SyncookiesRecv = value + procNetstat.TcpExt.SyncookiesRecv = &value case "SyncookiesFailed": - procNetstat.TcpExt.SyncookiesFailed = value + procNetstat.TcpExt.SyncookiesFailed = &value case "EmbryonicRsts": - procNetstat.TcpExt.EmbryonicRsts = value + procNetstat.TcpExt.EmbryonicRsts = &value case "PruneCalled": - procNetstat.TcpExt.PruneCalled = value + procNetstat.TcpExt.PruneCalled = &value case "RcvPruned": - procNetstat.TcpExt.RcvPruned = value + procNetstat.TcpExt.RcvPruned = &value case "OfoPruned": - procNetstat.TcpExt.OfoPruned = value + procNetstat.TcpExt.OfoPruned = &value case "OutOfWindowIcmps": - procNetstat.TcpExt.OutOfWindowIcmps = value + procNetstat.TcpExt.OutOfWindowIcmps = &value case "LockDroppedIcmps": - procNetstat.TcpExt.LockDroppedIcmps = value + procNetstat.TcpExt.LockDroppedIcmps = &value case "ArpFilter": - procNetstat.TcpExt.ArpFilter = value + procNetstat.TcpExt.ArpFilter = &value case "TW": - procNetstat.TcpExt.TW = value + procNetstat.TcpExt.TW = &value case "TWRecycled": - procNetstat.TcpExt.TWRecycled = value + procNetstat.TcpExt.TWRecycled = &value case "TWKilled": - procNetstat.TcpExt.TWKilled = value + procNetstat.TcpExt.TWKilled = &value case "PAWSActive": - procNetstat.TcpExt.PAWSActive = value + procNetstat.TcpExt.PAWSActive = &value case "PAWSEstab": - procNetstat.TcpExt.PAWSEstab = value + procNetstat.TcpExt.PAWSEstab = &value case "DelayedACKs": - procNetstat.TcpExt.DelayedACKs = value + procNetstat.TcpExt.DelayedACKs = &value case "DelayedACKLocked": - procNetstat.TcpExt.DelayedACKLocked = value + procNetstat.TcpExt.DelayedACKLocked = &value case "DelayedACKLost": - procNetstat.TcpExt.DelayedACKLost = value + procNetstat.TcpExt.DelayedACKLost = &value case "ListenOverflows": - procNetstat.TcpExt.ListenOverflows = value + procNetstat.TcpExt.ListenOverflows = &value case "ListenDrops": - procNetstat.TcpExt.ListenDrops = value + procNetstat.TcpExt.ListenDrops = &value case "TCPHPHits": - procNetstat.TcpExt.TCPHPHits = value + procNetstat.TcpExt.TCPHPHits = &value case "TCPPureAcks": - procNetstat.TcpExt.TCPPureAcks = value + procNetstat.TcpExt.TCPPureAcks = &value case "TCPHPAcks": - procNetstat.TcpExt.TCPHPAcks = value + procNetstat.TcpExt.TCPHPAcks = &value case "TCPRenoRecovery": - procNetstat.TcpExt.TCPRenoRecovery = value + procNetstat.TcpExt.TCPRenoRecovery = &value case "TCPSackRecovery": - procNetstat.TcpExt.TCPSackRecovery = value + procNetstat.TcpExt.TCPSackRecovery = &value case "TCPSACKReneging": - procNetstat.TcpExt.TCPSACKReneging = value + procNetstat.TcpExt.TCPSACKReneging = &value case "TCPSACKReorder": - procNetstat.TcpExt.TCPSACKReorder = value + procNetstat.TcpExt.TCPSACKReorder = &value case "TCPRenoReorder": - procNetstat.TcpExt.TCPRenoReorder = value + procNetstat.TcpExt.TCPRenoReorder = &value case "TCPTSReorder": - procNetstat.TcpExt.TCPTSReorder = value + procNetstat.TcpExt.TCPTSReorder = &value case "TCPFullUndo": - procNetstat.TcpExt.TCPFullUndo = value + procNetstat.TcpExt.TCPFullUndo = &value case "TCPPartialUndo": - procNetstat.TcpExt.TCPPartialUndo = value + procNetstat.TcpExt.TCPPartialUndo = &value case "TCPDSACKUndo": - procNetstat.TcpExt.TCPDSACKUndo = value + procNetstat.TcpExt.TCPDSACKUndo = &value case "TCPLossUndo": - procNetstat.TcpExt.TCPLossUndo = value + procNetstat.TcpExt.TCPLossUndo = &value case "TCPLostRetransmit": - procNetstat.TcpExt.TCPLostRetransmit = value + procNetstat.TcpExt.TCPLostRetransmit = &value case "TCPRenoFailures": - procNetstat.TcpExt.TCPRenoFailures = value + procNetstat.TcpExt.TCPRenoFailures = &value case "TCPSackFailures": - procNetstat.TcpExt.TCPSackFailures = value + procNetstat.TcpExt.TCPSackFailures = &value case "TCPLossFailures": - procNetstat.TcpExt.TCPLossFailures = value + procNetstat.TcpExt.TCPLossFailures = &value case "TCPFastRetrans": - procNetstat.TcpExt.TCPFastRetrans = value + procNetstat.TcpExt.TCPFastRetrans = &value case "TCPSlowStartRetrans": - procNetstat.TcpExt.TCPSlowStartRetrans = value + procNetstat.TcpExt.TCPSlowStartRetrans = &value case "TCPTimeouts": - procNetstat.TcpExt.TCPTimeouts = value + procNetstat.TcpExt.TCPTimeouts = &value case "TCPLossProbes": - procNetstat.TcpExt.TCPLossProbes = value + procNetstat.TcpExt.TCPLossProbes = &value case "TCPLossProbeRecovery": - procNetstat.TcpExt.TCPLossProbeRecovery = value + procNetstat.TcpExt.TCPLossProbeRecovery = &value case "TCPRenoRecoveryFail": - procNetstat.TcpExt.TCPRenoRecoveryFail = value + procNetstat.TcpExt.TCPRenoRecoveryFail = &value case "TCPSackRecoveryFail": - procNetstat.TcpExt.TCPSackRecoveryFail = value + procNetstat.TcpExt.TCPSackRecoveryFail = &value case "TCPRcvCollapsed": - procNetstat.TcpExt.TCPRcvCollapsed = value + procNetstat.TcpExt.TCPRcvCollapsed = &value case "TCPDSACKOldSent": - procNetstat.TcpExt.TCPDSACKOldSent = value + procNetstat.TcpExt.TCPDSACKOldSent = &value case "TCPDSACKOfoSent": - procNetstat.TcpExt.TCPDSACKOfoSent = value + procNetstat.TcpExt.TCPDSACKOfoSent = &value case "TCPDSACKRecv": - procNetstat.TcpExt.TCPDSACKRecv = value + procNetstat.TcpExt.TCPDSACKRecv = &value case "TCPDSACKOfoRecv": - procNetstat.TcpExt.TCPDSACKOfoRecv = value + procNetstat.TcpExt.TCPDSACKOfoRecv = &value case "TCPAbortOnData": - procNetstat.TcpExt.TCPAbortOnData = value + procNetstat.TcpExt.TCPAbortOnData = &value case "TCPAbortOnClose": - procNetstat.TcpExt.TCPAbortOnClose = value + procNetstat.TcpExt.TCPAbortOnClose = &value case "TCPDeferAcceptDrop": - procNetstat.TcpExt.TCPDeferAcceptDrop = value + procNetstat.TcpExt.TCPDeferAcceptDrop = &value case "IPReversePathFilter": - procNetstat.TcpExt.IPReversePathFilter = value + procNetstat.TcpExt.IPReversePathFilter = &value case "TCPTimeWaitOverflow": - procNetstat.TcpExt.TCPTimeWaitOverflow = value + procNetstat.TcpExt.TCPTimeWaitOverflow = &value case "TCPReqQFullDoCookies": - procNetstat.TcpExt.TCPReqQFullDoCookies = value + procNetstat.TcpExt.TCPReqQFullDoCookies = &value case "TCPReqQFullDrop": - procNetstat.TcpExt.TCPReqQFullDrop = value + procNetstat.TcpExt.TCPReqQFullDrop = &value case "TCPRetransFail": - procNetstat.TcpExt.TCPRetransFail = value + procNetstat.TcpExt.TCPRetransFail = &value case "TCPRcvCoalesce": - procNetstat.TcpExt.TCPRcvCoalesce = value + procNetstat.TcpExt.TCPRcvCoalesce = &value + case "TCPRcvQDrop": + procNetstat.TcpExt.TCPRcvQDrop = &value case "TCPOFOQueue": - procNetstat.TcpExt.TCPOFOQueue = value + procNetstat.TcpExt.TCPOFOQueue = &value case "TCPOFODrop": - procNetstat.TcpExt.TCPOFODrop = value + procNetstat.TcpExt.TCPOFODrop = &value case "TCPOFOMerge": - procNetstat.TcpExt.TCPOFOMerge = value + procNetstat.TcpExt.TCPOFOMerge = &value case "TCPChallengeACK": - procNetstat.TcpExt.TCPChallengeACK = value + procNetstat.TcpExt.TCPChallengeACK = &value case "TCPSYNChallenge": - procNetstat.TcpExt.TCPSYNChallenge = value + procNetstat.TcpExt.TCPSYNChallenge = &value case "TCPFastOpenActive": - procNetstat.TcpExt.TCPFastOpenActive = value + procNetstat.TcpExt.TCPFastOpenActive = &value case "TCPFastOpenActiveFail": - procNetstat.TcpExt.TCPFastOpenActiveFail = value + procNetstat.TcpExt.TCPFastOpenActiveFail = &value case "TCPFastOpenPassive": - procNetstat.TcpExt.TCPFastOpenPassive = value + procNetstat.TcpExt.TCPFastOpenPassive = &value case "TCPFastOpenPassiveFail": - procNetstat.TcpExt.TCPFastOpenPassiveFail = value + procNetstat.TcpExt.TCPFastOpenPassiveFail = &value case "TCPFastOpenListenOverflow": - procNetstat.TcpExt.TCPFastOpenListenOverflow = value + procNetstat.TcpExt.TCPFastOpenListenOverflow = &value case "TCPFastOpenCookieReqd": - procNetstat.TcpExt.TCPFastOpenCookieReqd = value + procNetstat.TcpExt.TCPFastOpenCookieReqd = &value case "TCPFastOpenBlackhole": - procNetstat.TcpExt.TCPFastOpenBlackhole = value + procNetstat.TcpExt.TCPFastOpenBlackhole = &value case "TCPSpuriousRtxHostQueues": - procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value + procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value case "BusyPollRxPackets": - procNetstat.TcpExt.BusyPollRxPackets = value + procNetstat.TcpExt.BusyPollRxPackets = &value case "TCPAutoCorking": - procNetstat.TcpExt.TCPAutoCorking = value + procNetstat.TcpExt.TCPAutoCorking = &value case "TCPFromZeroWindowAdv": - procNetstat.TcpExt.TCPFromZeroWindowAdv = value + procNetstat.TcpExt.TCPFromZeroWindowAdv = &value case "TCPToZeroWindowAdv": - procNetstat.TcpExt.TCPToZeroWindowAdv = value + procNetstat.TcpExt.TCPToZeroWindowAdv = &value case "TCPWantZeroWindowAdv": - procNetstat.TcpExt.TCPWantZeroWindowAdv = value + procNetstat.TcpExt.TCPWantZeroWindowAdv = &value case "TCPSynRetrans": - procNetstat.TcpExt.TCPSynRetrans = value + procNetstat.TcpExt.TCPSynRetrans = &value case "TCPOrigDataSent": - procNetstat.TcpExt.TCPOrigDataSent = value + procNetstat.TcpExt.TCPOrigDataSent = &value case "TCPHystartTrainDetect": - procNetstat.TcpExt.TCPHystartTrainDetect = value + procNetstat.TcpExt.TCPHystartTrainDetect = &value case "TCPHystartTrainCwnd": - procNetstat.TcpExt.TCPHystartTrainCwnd = value + procNetstat.TcpExt.TCPHystartTrainCwnd = &value case "TCPHystartDelayDetect": - procNetstat.TcpExt.TCPHystartDelayDetect = value + procNetstat.TcpExt.TCPHystartDelayDetect = &value case "TCPHystartDelayCwnd": - procNetstat.TcpExt.TCPHystartDelayCwnd = value + procNetstat.TcpExt.TCPHystartDelayCwnd = &value case "TCPACKSkippedSynRecv": - procNetstat.TcpExt.TCPACKSkippedSynRecv = value + procNetstat.TcpExt.TCPACKSkippedSynRecv = &value case "TCPACKSkippedPAWS": - procNetstat.TcpExt.TCPACKSkippedPAWS = value + procNetstat.TcpExt.TCPACKSkippedPAWS = &value case "TCPACKSkippedSeq": - procNetstat.TcpExt.TCPACKSkippedSeq = value + procNetstat.TcpExt.TCPACKSkippedSeq = &value case "TCPACKSkippedFinWait2": - procNetstat.TcpExt.TCPACKSkippedFinWait2 = value + procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value case "TCPACKSkippedTimeWait": - procNetstat.TcpExt.TCPACKSkippedTimeWait = value + procNetstat.TcpExt.TCPACKSkippedTimeWait = &value case "TCPACKSkippedChallenge": - procNetstat.TcpExt.TCPACKSkippedChallenge = value + procNetstat.TcpExt.TCPACKSkippedChallenge = &value case "TCPWinProbe": - procNetstat.TcpExt.TCPWinProbe = value + procNetstat.TcpExt.TCPWinProbe = &value case "TCPKeepAlive": - procNetstat.TcpExt.TCPKeepAlive = value + procNetstat.TcpExt.TCPKeepAlive = &value case "TCPMTUPFail": - procNetstat.TcpExt.TCPMTUPFail = value + procNetstat.TcpExt.TCPMTUPFail = &value case "TCPMTUPSuccess": - procNetstat.TcpExt.TCPMTUPSuccess = value + procNetstat.TcpExt.TCPMTUPSuccess = &value case "TCPWqueueTooBig": - procNetstat.TcpExt.TCPWqueueTooBig = value + procNetstat.TcpExt.TCPWqueueTooBig = &value } case "IpExt": switch key { case "InNoRoutes": - procNetstat.IpExt.InNoRoutes = value + procNetstat.IpExt.InNoRoutes = &value case "InTruncatedPkts": - procNetstat.IpExt.InTruncatedPkts = value + procNetstat.IpExt.InTruncatedPkts = &value case "InMcastPkts": - procNetstat.IpExt.InMcastPkts = value + procNetstat.IpExt.InMcastPkts = &value case "OutMcastPkts": - procNetstat.IpExt.OutMcastPkts = value + procNetstat.IpExt.OutMcastPkts = &value case "InBcastPkts": - procNetstat.IpExt.InBcastPkts = value + procNetstat.IpExt.InBcastPkts = &value case "OutBcastPkts": - procNetstat.IpExt.OutBcastPkts = value + procNetstat.IpExt.OutBcastPkts = &value case "InOctets": - procNetstat.IpExt.InOctets = value + procNetstat.IpExt.InOctets = &value case "OutOctets": - procNetstat.IpExt.OutOctets = value + procNetstat.IpExt.OutOctets = &value case "InMcastOctets": - procNetstat.IpExt.InMcastOctets = value + procNetstat.IpExt.InMcastOctets = &value case "OutMcastOctets": - procNetstat.IpExt.OutMcastOctets = value + procNetstat.IpExt.OutMcastOctets = &value case "InBcastOctets": - procNetstat.IpExt.InBcastOctets = value + procNetstat.IpExt.InBcastOctets = &value case "OutBcastOctets": - procNetstat.IpExt.OutBcastOctets = value + procNetstat.IpExt.OutBcastOctets = &value case "InCsumErrors": - procNetstat.IpExt.InCsumErrors = value + procNetstat.IpExt.InCsumErrors = &value case "InNoECTPkts": - procNetstat.IpExt.InNoECTPkts = value + procNetstat.IpExt.InNoECTPkts = &value case "InECT1Pkts": - procNetstat.IpExt.InECT1Pkts = value + procNetstat.IpExt.InECT1Pkts = &value case "InECT0Pkts": - procNetstat.IpExt.InECT0Pkts = value + procNetstat.IpExt.InECT0Pkts = &value case "InCEPkts": - procNetstat.IpExt.InCEPkts = value + procNetstat.IpExt.InCEPkts = &value case "ReasmOverlaps": - procNetstat.IpExt.ReasmOverlaps = value + procNetstat.IpExt.ReasmOverlaps = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index ae191896cbd..6c46b718849 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -37,100 +37,100 @@ type ProcSnmp struct { } type Ip struct { // nolint:revive - Forwarding float64 - DefaultTTL float64 - InReceives float64 - InHdrErrors float64 - InAddrErrors float64 - ForwDatagrams float64 - InUnknownProtos float64 - InDiscards float64 - InDelivers float64 - OutRequests float64 - OutDiscards float64 - OutNoRoutes float64 - ReasmTimeout float64 - ReasmReqds float64 - ReasmOKs float64 - ReasmFails float64 - FragOKs float64 - FragFails float64 - FragCreates float64 + Forwarding *float64 + DefaultTTL *float64 + InReceives *float64 + InHdrErrors *float64 + InAddrErrors *float64 + ForwDatagrams *float64 + InUnknownProtos *float64 + InDiscards *float64 + InDelivers *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 } -type Icmp struct { - InMsgs float64 - InErrors float64 - InCsumErrors float64 - InDestUnreachs float64 - InTimeExcds float64 - InParmProbs float64 - InSrcQuenchs float64 - InRedirects float64 - InEchos float64 - InEchoReps float64 - InTimestamps float64 - InTimestampReps float64 - InAddrMasks float64 - InAddrMaskReps float64 - OutMsgs float64 - OutErrors float64 - OutDestUnreachs float64 - OutTimeExcds float64 - OutParmProbs float64 - OutSrcQuenchs float64 - OutRedirects float64 - OutEchos float64 - OutEchoReps float64 - OutTimestamps float64 - OutTimestampReps float64 - OutAddrMasks float64 - OutAddrMaskReps float64 +type Icmp struct { // nolint:revive + InMsgs *float64 + InErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InTimeExcds *float64 + InParmProbs *float64 + InSrcQuenchs *float64 + InRedirects *float64 + InEchos *float64 + InEchoReps *float64 + InTimestamps *float64 + InTimestampReps *float64 + InAddrMasks *float64 + InAddrMaskReps *float64 + OutMsgs *float64 + OutErrors *float64 + OutDestUnreachs *float64 + OutTimeExcds *float64 + OutParmProbs *float64 + OutSrcQuenchs *float64 + OutRedirects *float64 + OutEchos *float64 + OutEchoReps *float64 + OutTimestamps *float64 + OutTimestampReps *float64 + OutAddrMasks *float64 + OutAddrMaskReps *float64 } type IcmpMsg struct { - InType3 float64 - OutType3 float64 + InType3 *float64 + OutType3 *float64 } type Tcp struct { // nolint:revive - RtoAlgorithm float64 - RtoMin float64 - RtoMax float64 - MaxConn float64 - ActiveOpens float64 - PassiveOpens float64 - AttemptFails float64 - EstabResets float64 - CurrEstab float64 - InSegs float64 - OutSegs float64 - RetransSegs float64 - InErrs float64 - OutRsts float64 - InCsumErrors float64 + RtoAlgorithm *float64 + RtoMin *float64 + RtoMax *float64 + MaxConn *float64 + ActiveOpens *float64 + PassiveOpens *float64 + AttemptFails *float64 + EstabResets *float64 + CurrEstab *float64 + InSegs *float64 + OutSegs *float64 + RetransSegs *float64 + InErrs *float64 + OutRsts *float64 + InCsumErrors *float64 } type Udp struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } type UdpLite struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } func (p Proc) Snmp() (ProcSnmp, error) { @@ -173,178 +173,178 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { case "Ip": switch key { case "Forwarding": - procSnmp.Ip.Forwarding = value + procSnmp.Ip.Forwarding = &value case "DefaultTTL": - procSnmp.Ip.DefaultTTL = value + procSnmp.Ip.DefaultTTL = &value case "InReceives": - procSnmp.Ip.InReceives = value + procSnmp.Ip.InReceives = &value case "InHdrErrors": - procSnmp.Ip.InHdrErrors = value + procSnmp.Ip.InHdrErrors = &value case "InAddrErrors": - procSnmp.Ip.InAddrErrors = value + procSnmp.Ip.InAddrErrors = &value case "ForwDatagrams": - procSnmp.Ip.ForwDatagrams = value + procSnmp.Ip.ForwDatagrams = &value case "InUnknownProtos": - procSnmp.Ip.InUnknownProtos = value + procSnmp.Ip.InUnknownProtos = &value case "InDiscards": - procSnmp.Ip.InDiscards = value + procSnmp.Ip.InDiscards = &value case "InDelivers": - procSnmp.Ip.InDelivers = value + procSnmp.Ip.InDelivers = &value case "OutRequests": - procSnmp.Ip.OutRequests = value + procSnmp.Ip.OutRequests = &value case "OutDiscards": - procSnmp.Ip.OutDiscards = value + procSnmp.Ip.OutDiscards = &value case "OutNoRoutes": - procSnmp.Ip.OutNoRoutes = value + procSnmp.Ip.OutNoRoutes = &value case "ReasmTimeout": - procSnmp.Ip.ReasmTimeout = value + procSnmp.Ip.ReasmTimeout = &value case "ReasmReqds": - procSnmp.Ip.ReasmReqds = value + procSnmp.Ip.ReasmReqds = &value case "ReasmOKs": - procSnmp.Ip.ReasmOKs = value + procSnmp.Ip.ReasmOKs = &value case "ReasmFails": - procSnmp.Ip.ReasmFails = value + procSnmp.Ip.ReasmFails = &value case "FragOKs": - procSnmp.Ip.FragOKs = value + procSnmp.Ip.FragOKs = &value case "FragFails": - procSnmp.Ip.FragFails = value + procSnmp.Ip.FragFails = &value case "FragCreates": - procSnmp.Ip.FragCreates = value + procSnmp.Ip.FragCreates = &value } case "Icmp": switch key { case "InMsgs": - procSnmp.Icmp.InMsgs = value + procSnmp.Icmp.InMsgs = &value case "InErrors": - procSnmp.Icmp.InErrors = value + procSnmp.Icmp.InErrors = &value case "InCsumErrors": - procSnmp.Icmp.InCsumErrors = value + procSnmp.Icmp.InCsumErrors = &value case "InDestUnreachs": - procSnmp.Icmp.InDestUnreachs = value + procSnmp.Icmp.InDestUnreachs = &value case "InTimeExcds": - procSnmp.Icmp.InTimeExcds = value + procSnmp.Icmp.InTimeExcds = &value case "InParmProbs": - procSnmp.Icmp.InParmProbs = value + procSnmp.Icmp.InParmProbs = &value case "InSrcQuenchs": - procSnmp.Icmp.InSrcQuenchs = value + procSnmp.Icmp.InSrcQuenchs = &value case "InRedirects": - procSnmp.Icmp.InRedirects = value + procSnmp.Icmp.InRedirects = &value case "InEchos": - procSnmp.Icmp.InEchos = value + procSnmp.Icmp.InEchos = &value case "InEchoReps": - procSnmp.Icmp.InEchoReps = value + procSnmp.Icmp.InEchoReps = &value case "InTimestamps": - procSnmp.Icmp.InTimestamps = value + procSnmp.Icmp.InTimestamps = &value case "InTimestampReps": - procSnmp.Icmp.InTimestampReps = value + procSnmp.Icmp.InTimestampReps = &value case "InAddrMasks": - procSnmp.Icmp.InAddrMasks = value + procSnmp.Icmp.InAddrMasks = &value case "InAddrMaskReps": - procSnmp.Icmp.InAddrMaskReps = value + procSnmp.Icmp.InAddrMaskReps = &value case "OutMsgs": - procSnmp.Icmp.OutMsgs = value + procSnmp.Icmp.OutMsgs = &value case "OutErrors": - procSnmp.Icmp.OutErrors = value + procSnmp.Icmp.OutErrors = &value case "OutDestUnreachs": - procSnmp.Icmp.OutDestUnreachs = value + procSnmp.Icmp.OutDestUnreachs = &value case "OutTimeExcds": - procSnmp.Icmp.OutTimeExcds = value + procSnmp.Icmp.OutTimeExcds = &value case "OutParmProbs": - procSnmp.Icmp.OutParmProbs = value + procSnmp.Icmp.OutParmProbs = &value case "OutSrcQuenchs": - procSnmp.Icmp.OutSrcQuenchs = value + procSnmp.Icmp.OutSrcQuenchs = &value case "OutRedirects": - procSnmp.Icmp.OutRedirects = value + procSnmp.Icmp.OutRedirects = &value case "OutEchos": - procSnmp.Icmp.OutEchos = value + procSnmp.Icmp.OutEchos = &value case "OutEchoReps": - procSnmp.Icmp.OutEchoReps = value + procSnmp.Icmp.OutEchoReps = &value case "OutTimestamps": - procSnmp.Icmp.OutTimestamps = value + procSnmp.Icmp.OutTimestamps = &value case "OutTimestampReps": - procSnmp.Icmp.OutTimestampReps = value + procSnmp.Icmp.OutTimestampReps = &value case "OutAddrMasks": - procSnmp.Icmp.OutAddrMasks = value + procSnmp.Icmp.OutAddrMasks = &value case "OutAddrMaskReps": - procSnmp.Icmp.OutAddrMaskReps = value + procSnmp.Icmp.OutAddrMaskReps = &value } case "IcmpMsg": switch key { case "InType3": - procSnmp.IcmpMsg.InType3 = value + procSnmp.IcmpMsg.InType3 = &value case "OutType3": - procSnmp.IcmpMsg.OutType3 = value + procSnmp.IcmpMsg.OutType3 = &value } case "Tcp": switch key { case "RtoAlgorithm": - procSnmp.Tcp.RtoAlgorithm = value + procSnmp.Tcp.RtoAlgorithm = &value case "RtoMin": - procSnmp.Tcp.RtoMin = value + procSnmp.Tcp.RtoMin = &value case "RtoMax": - procSnmp.Tcp.RtoMax = value + procSnmp.Tcp.RtoMax = &value case "MaxConn": - procSnmp.Tcp.MaxConn = value + procSnmp.Tcp.MaxConn = &value case "ActiveOpens": - procSnmp.Tcp.ActiveOpens = value + procSnmp.Tcp.ActiveOpens = &value case "PassiveOpens": - procSnmp.Tcp.PassiveOpens = value + procSnmp.Tcp.PassiveOpens = &value case "AttemptFails": - procSnmp.Tcp.AttemptFails = value + procSnmp.Tcp.AttemptFails = &value case "EstabResets": - procSnmp.Tcp.EstabResets = value + procSnmp.Tcp.EstabResets = &value case "CurrEstab": - procSnmp.Tcp.CurrEstab = value + procSnmp.Tcp.CurrEstab = &value case "InSegs": - procSnmp.Tcp.InSegs = value + procSnmp.Tcp.InSegs = &value case "OutSegs": - procSnmp.Tcp.OutSegs = value + procSnmp.Tcp.OutSegs = &value case "RetransSegs": - procSnmp.Tcp.RetransSegs = value + procSnmp.Tcp.RetransSegs = &value case "InErrs": - procSnmp.Tcp.InErrs = value + procSnmp.Tcp.InErrs = &value case "OutRsts": - procSnmp.Tcp.OutRsts = value + procSnmp.Tcp.OutRsts = &value case "InCsumErrors": - procSnmp.Tcp.InCsumErrors = value + procSnmp.Tcp.InCsumErrors = &value } case "Udp": switch key { case "InDatagrams": - procSnmp.Udp.InDatagrams = value + procSnmp.Udp.InDatagrams = &value case "NoPorts": - procSnmp.Udp.NoPorts = value + procSnmp.Udp.NoPorts = &value case "InErrors": - procSnmp.Udp.InErrors = value + procSnmp.Udp.InErrors = &value case "OutDatagrams": - procSnmp.Udp.OutDatagrams = value + procSnmp.Udp.OutDatagrams = &value case "RcvbufErrors": - procSnmp.Udp.RcvbufErrors = value + procSnmp.Udp.RcvbufErrors = &value case "SndbufErrors": - procSnmp.Udp.SndbufErrors = value + procSnmp.Udp.SndbufErrors = &value case "InCsumErrors": - procSnmp.Udp.InCsumErrors = value + procSnmp.Udp.InCsumErrors = &value case "IgnoredMulti": - procSnmp.Udp.IgnoredMulti = value + procSnmp.Udp.IgnoredMulti = &value } case "UdpLite": switch key { case "InDatagrams": - procSnmp.UdpLite.InDatagrams = value + procSnmp.UdpLite.InDatagrams = &value case "NoPorts": - procSnmp.UdpLite.NoPorts = value + procSnmp.UdpLite.NoPorts = &value case "InErrors": - procSnmp.UdpLite.InErrors = value + procSnmp.UdpLite.InErrors = &value case "OutDatagrams": - procSnmp.UdpLite.OutDatagrams = value + procSnmp.UdpLite.OutDatagrams = &value case "RcvbufErrors": - procSnmp.UdpLite.RcvbufErrors = value + procSnmp.UdpLite.RcvbufErrors = &value case "SndbufErrors": - procSnmp.UdpLite.SndbufErrors = value + procSnmp.UdpLite.SndbufErrors = &value case "InCsumErrors": - procSnmp.UdpLite.InCsumErrors = value + procSnmp.UdpLite.InCsumErrors = &value case "IgnoredMulti": - procSnmp.UdpLite.IgnoredMulti = value + procSnmp.UdpLite.IgnoredMulti = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go index f611992d52c..3059cc6a136 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -36,106 +36,106 @@ type ProcSnmp6 struct { } type Ip6 struct { // nolint:revive - InReceives float64 - InHdrErrors float64 - InTooBigErrors float64 - InNoRoutes float64 - InAddrErrors float64 - InUnknownProtos float64 - InTruncatedPkts float64 - InDiscards float64 - InDelivers float64 - OutForwDatagrams float64 - OutRequests float64 - OutDiscards float64 - OutNoRoutes float64 - ReasmTimeout float64 - ReasmReqds float64 - ReasmOKs float64 - ReasmFails float64 - FragOKs float64 - FragFails float64 - FragCreates float64 - InMcastPkts float64 - OutMcastPkts float64 - InOctets float64 - OutOctets float64 - InMcastOctets float64 - OutMcastOctets float64 - InBcastOctets float64 - OutBcastOctets float64 - InNoECTPkts float64 - InECT1Pkts float64 - InECT0Pkts float64 - InCEPkts float64 + InReceives *float64 + InHdrErrors *float64 + InTooBigErrors *float64 + InNoRoutes *float64 + InAddrErrors *float64 + InUnknownProtos *float64 + InTruncatedPkts *float64 + InDiscards *float64 + InDelivers *float64 + OutForwDatagrams *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 } type Icmp6 struct { - InMsgs float64 - InErrors float64 - OutMsgs float64 - OutErrors float64 - InCsumErrors float64 - InDestUnreachs float64 - InPktTooBigs float64 - InTimeExcds float64 - InParmProblems float64 - InEchos float64 - InEchoReplies float64 - InGroupMembQueries float64 - InGroupMembResponses float64 - InGroupMembReductions float64 - InRouterSolicits float64 - InRouterAdvertisements float64 - InNeighborSolicits float64 - InNeighborAdvertisements float64 - InRedirects float64 - InMLDv2Reports float64 - OutDestUnreachs float64 - OutPktTooBigs float64 - OutTimeExcds float64 - OutParmProblems float64 - OutEchos float64 - OutEchoReplies float64 - OutGroupMembQueries float64 - OutGroupMembResponses float64 - OutGroupMembReductions float64 - OutRouterSolicits float64 - OutRouterAdvertisements float64 - OutNeighborSolicits float64 - OutNeighborAdvertisements float64 - OutRedirects float64 - OutMLDv2Reports float64 - InType1 float64 - InType134 float64 - InType135 float64 - InType136 float64 - InType143 float64 - OutType133 float64 - OutType135 float64 - OutType136 float64 - OutType143 float64 + InMsgs *float64 + InErrors *float64 + OutMsgs *float64 + OutErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InPktTooBigs *float64 + InTimeExcds *float64 + InParmProblems *float64 + InEchos *float64 + InEchoReplies *float64 + InGroupMembQueries *float64 + InGroupMembResponses *float64 + InGroupMembReductions *float64 + InRouterSolicits *float64 + InRouterAdvertisements *float64 + InNeighborSolicits *float64 + InNeighborAdvertisements *float64 + InRedirects *float64 + InMLDv2Reports *float64 + OutDestUnreachs *float64 + OutPktTooBigs *float64 + OutTimeExcds *float64 + OutParmProblems *float64 + OutEchos *float64 + OutEchoReplies *float64 + OutGroupMembQueries *float64 + OutGroupMembResponses *float64 + OutGroupMembReductions *float64 + OutRouterSolicits *float64 + OutRouterAdvertisements *float64 + OutNeighborSolicits *float64 + OutNeighborAdvertisements *float64 + OutRedirects *float64 + OutMLDv2Reports *float64 + InType1 *float64 + InType134 *float64 + InType135 *float64 + InType136 *float64 + InType143 *float64 + OutType133 *float64 + OutType135 *float64 + OutType136 *float64 + OutType143 *float64 } type Udp6 struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } type UdpLite6 struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 } func (p Proc) Snmp6() (ProcSnmp6, error) { @@ -182,197 +182,197 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "Ip6": switch key { case "InReceives": - procSnmp6.Ip6.InReceives = value + procSnmp6.Ip6.InReceives = &value case "InHdrErrors": - procSnmp6.Ip6.InHdrErrors = value + procSnmp6.Ip6.InHdrErrors = &value case "InTooBigErrors": - procSnmp6.Ip6.InTooBigErrors = value + procSnmp6.Ip6.InTooBigErrors = &value case "InNoRoutes": - procSnmp6.Ip6.InNoRoutes = value + procSnmp6.Ip6.InNoRoutes = &value case "InAddrErrors": - procSnmp6.Ip6.InAddrErrors = value + procSnmp6.Ip6.InAddrErrors = &value case "InUnknownProtos": - procSnmp6.Ip6.InUnknownProtos = value + procSnmp6.Ip6.InUnknownProtos = &value case "InTruncatedPkts": - procSnmp6.Ip6.InTruncatedPkts = value + procSnmp6.Ip6.InTruncatedPkts = &value case "InDiscards": - procSnmp6.Ip6.InDiscards = value + procSnmp6.Ip6.InDiscards = &value case "InDelivers": - procSnmp6.Ip6.InDelivers = value + procSnmp6.Ip6.InDelivers = &value case "OutForwDatagrams": - procSnmp6.Ip6.OutForwDatagrams = value + procSnmp6.Ip6.OutForwDatagrams = &value case "OutRequests": - procSnmp6.Ip6.OutRequests = value + procSnmp6.Ip6.OutRequests = &value case "OutDiscards": - procSnmp6.Ip6.OutDiscards = value + procSnmp6.Ip6.OutDiscards = &value case "OutNoRoutes": - procSnmp6.Ip6.OutNoRoutes = value + procSnmp6.Ip6.OutNoRoutes = &value case "ReasmTimeout": - procSnmp6.Ip6.ReasmTimeout = value + procSnmp6.Ip6.ReasmTimeout = &value case "ReasmReqds": - procSnmp6.Ip6.ReasmReqds = value + procSnmp6.Ip6.ReasmReqds = &value case "ReasmOKs": - procSnmp6.Ip6.ReasmOKs = value + procSnmp6.Ip6.ReasmOKs = &value case "ReasmFails": - procSnmp6.Ip6.ReasmFails = value + procSnmp6.Ip6.ReasmFails = &value case "FragOKs": - procSnmp6.Ip6.FragOKs = value + procSnmp6.Ip6.FragOKs = &value case "FragFails": - procSnmp6.Ip6.FragFails = value + procSnmp6.Ip6.FragFails = &value case "FragCreates": - procSnmp6.Ip6.FragCreates = value + procSnmp6.Ip6.FragCreates = &value case "InMcastPkts": - procSnmp6.Ip6.InMcastPkts = value + procSnmp6.Ip6.InMcastPkts = &value case "OutMcastPkts": - procSnmp6.Ip6.OutMcastPkts = value + procSnmp6.Ip6.OutMcastPkts = &value case "InOctets": - procSnmp6.Ip6.InOctets = value + procSnmp6.Ip6.InOctets = &value case "OutOctets": - procSnmp6.Ip6.OutOctets = value + procSnmp6.Ip6.OutOctets = &value case "InMcastOctets": - procSnmp6.Ip6.InMcastOctets = value + procSnmp6.Ip6.InMcastOctets = &value case "OutMcastOctets": - procSnmp6.Ip6.OutMcastOctets = value + procSnmp6.Ip6.OutMcastOctets = &value case "InBcastOctets": - procSnmp6.Ip6.InBcastOctets = value + procSnmp6.Ip6.InBcastOctets = &value case "OutBcastOctets": - procSnmp6.Ip6.OutBcastOctets = value + procSnmp6.Ip6.OutBcastOctets = &value case "InNoECTPkts": - procSnmp6.Ip6.InNoECTPkts = value + procSnmp6.Ip6.InNoECTPkts = &value case "InECT1Pkts": - procSnmp6.Ip6.InECT1Pkts = value + procSnmp6.Ip6.InECT1Pkts = &value case "InECT0Pkts": - procSnmp6.Ip6.InECT0Pkts = value + procSnmp6.Ip6.InECT0Pkts = &value case "InCEPkts": - procSnmp6.Ip6.InCEPkts = value + procSnmp6.Ip6.InCEPkts = &value } case "Icmp6": switch key { case "InMsgs": - procSnmp6.Icmp6.InMsgs = value + procSnmp6.Icmp6.InMsgs = &value case "InErrors": - procSnmp6.Icmp6.InErrors = value + procSnmp6.Icmp6.InErrors = &value case "OutMsgs": - procSnmp6.Icmp6.OutMsgs = value + procSnmp6.Icmp6.OutMsgs = &value case "OutErrors": - procSnmp6.Icmp6.OutErrors = value + procSnmp6.Icmp6.OutErrors = &value case "InCsumErrors": - procSnmp6.Icmp6.InCsumErrors = value + procSnmp6.Icmp6.InCsumErrors = &value case "InDestUnreachs": - procSnmp6.Icmp6.InDestUnreachs = value + procSnmp6.Icmp6.InDestUnreachs = &value case "InPktTooBigs": - procSnmp6.Icmp6.InPktTooBigs = value + procSnmp6.Icmp6.InPktTooBigs = &value case "InTimeExcds": - procSnmp6.Icmp6.InTimeExcds = value + procSnmp6.Icmp6.InTimeExcds = &value case "InParmProblems": - procSnmp6.Icmp6.InParmProblems = value + procSnmp6.Icmp6.InParmProblems = &value case "InEchos": - procSnmp6.Icmp6.InEchos = value + procSnmp6.Icmp6.InEchos = &value case "InEchoReplies": - procSnmp6.Icmp6.InEchoReplies = value + procSnmp6.Icmp6.InEchoReplies = &value case "InGroupMembQueries": - procSnmp6.Icmp6.InGroupMembQueries = value + procSnmp6.Icmp6.InGroupMembQueries = &value case "InGroupMembResponses": - procSnmp6.Icmp6.InGroupMembResponses = value + procSnmp6.Icmp6.InGroupMembResponses = &value case "InGroupMembReductions": - procSnmp6.Icmp6.InGroupMembReductions = value + procSnmp6.Icmp6.InGroupMembReductions = &value case "InRouterSolicits": - procSnmp6.Icmp6.InRouterSolicits = value + procSnmp6.Icmp6.InRouterSolicits = &value case "InRouterAdvertisements": - procSnmp6.Icmp6.InRouterAdvertisements = value + procSnmp6.Icmp6.InRouterAdvertisements = &value case "InNeighborSolicits": - procSnmp6.Icmp6.InNeighborSolicits = value + procSnmp6.Icmp6.InNeighborSolicits = &value case "InNeighborAdvertisements": - procSnmp6.Icmp6.InNeighborAdvertisements = value + procSnmp6.Icmp6.InNeighborAdvertisements = &value case "InRedirects": - procSnmp6.Icmp6.InRedirects = value + procSnmp6.Icmp6.InRedirects = &value case "InMLDv2Reports": - procSnmp6.Icmp6.InMLDv2Reports = value + procSnmp6.Icmp6.InMLDv2Reports = &value case "OutDestUnreachs": - procSnmp6.Icmp6.OutDestUnreachs = value + procSnmp6.Icmp6.OutDestUnreachs = &value case "OutPktTooBigs": - procSnmp6.Icmp6.OutPktTooBigs = value + procSnmp6.Icmp6.OutPktTooBigs = &value case "OutTimeExcds": - procSnmp6.Icmp6.OutTimeExcds = value + procSnmp6.Icmp6.OutTimeExcds = &value case "OutParmProblems": - procSnmp6.Icmp6.OutParmProblems = value + procSnmp6.Icmp6.OutParmProblems = &value case "OutEchos": - procSnmp6.Icmp6.OutEchos = value + procSnmp6.Icmp6.OutEchos = &value case "OutEchoReplies": - procSnmp6.Icmp6.OutEchoReplies = value + procSnmp6.Icmp6.OutEchoReplies = &value case "OutGroupMembQueries": - procSnmp6.Icmp6.OutGroupMembQueries = value + procSnmp6.Icmp6.OutGroupMembQueries = &value case "OutGroupMembResponses": - procSnmp6.Icmp6.OutGroupMembResponses = value + procSnmp6.Icmp6.OutGroupMembResponses = &value case "OutGroupMembReductions": - procSnmp6.Icmp6.OutGroupMembReductions = value + procSnmp6.Icmp6.OutGroupMembReductions = &value case "OutRouterSolicits": - procSnmp6.Icmp6.OutRouterSolicits = value + procSnmp6.Icmp6.OutRouterSolicits = &value case "OutRouterAdvertisements": - procSnmp6.Icmp6.OutRouterAdvertisements = value + procSnmp6.Icmp6.OutRouterAdvertisements = &value case "OutNeighborSolicits": - procSnmp6.Icmp6.OutNeighborSolicits = value + procSnmp6.Icmp6.OutNeighborSolicits = &value case "OutNeighborAdvertisements": - procSnmp6.Icmp6.OutNeighborAdvertisements = value + procSnmp6.Icmp6.OutNeighborAdvertisements = &value case "OutRedirects": - procSnmp6.Icmp6.OutRedirects = value + procSnmp6.Icmp6.OutRedirects = &value case "OutMLDv2Reports": - procSnmp6.Icmp6.OutMLDv2Reports = value + procSnmp6.Icmp6.OutMLDv2Reports = &value case "InType1": - procSnmp6.Icmp6.InType1 = value + procSnmp6.Icmp6.InType1 = &value case "InType134": - procSnmp6.Icmp6.InType134 = value + procSnmp6.Icmp6.InType134 = &value case "InType135": - procSnmp6.Icmp6.InType135 = value + procSnmp6.Icmp6.InType135 = &value case "InType136": - procSnmp6.Icmp6.InType136 = value + procSnmp6.Icmp6.InType136 = &value case "InType143": - procSnmp6.Icmp6.InType143 = value + procSnmp6.Icmp6.InType143 = &value case "OutType133": - procSnmp6.Icmp6.OutType133 = value + procSnmp6.Icmp6.OutType133 = &value case "OutType135": - procSnmp6.Icmp6.OutType135 = value + procSnmp6.Icmp6.OutType135 = &value case "OutType136": - procSnmp6.Icmp6.OutType136 = value + procSnmp6.Icmp6.OutType136 = &value case "OutType143": - procSnmp6.Icmp6.OutType143 = value + procSnmp6.Icmp6.OutType143 = &value } case "Udp6": switch key { case "InDatagrams": - procSnmp6.Udp6.InDatagrams = value + procSnmp6.Udp6.InDatagrams = &value case "NoPorts": - procSnmp6.Udp6.NoPorts = value + procSnmp6.Udp6.NoPorts = &value case "InErrors": - procSnmp6.Udp6.InErrors = value + procSnmp6.Udp6.InErrors = &value case "OutDatagrams": - procSnmp6.Udp6.OutDatagrams = value + procSnmp6.Udp6.OutDatagrams = &value case "RcvbufErrors": - procSnmp6.Udp6.RcvbufErrors = value + procSnmp6.Udp6.RcvbufErrors = &value case "SndbufErrors": - procSnmp6.Udp6.SndbufErrors = value + procSnmp6.Udp6.SndbufErrors = &value case "InCsumErrors": - procSnmp6.Udp6.InCsumErrors = value + procSnmp6.Udp6.InCsumErrors = &value case "IgnoredMulti": - procSnmp6.Udp6.IgnoredMulti = value + procSnmp6.Udp6.IgnoredMulti = &value } case "UdpLite6": switch key { case "InDatagrams": - procSnmp6.UdpLite6.InDatagrams = value + procSnmp6.UdpLite6.InDatagrams = &value case "NoPorts": - procSnmp6.UdpLite6.NoPorts = value + procSnmp6.UdpLite6.NoPorts = &value case "InErrors": - procSnmp6.UdpLite6.InErrors = value + procSnmp6.UdpLite6.InErrors = &value case "OutDatagrams": - procSnmp6.UdpLite6.OutDatagrams = value + procSnmp6.UdpLite6.OutDatagrams = &value case "RcvbufErrors": - procSnmp6.UdpLite6.RcvbufErrors = value + procSnmp6.UdpLite6.RcvbufErrors = &value case "SndbufErrors": - procSnmp6.UdpLite6.SndbufErrors = value + procSnmp6.UdpLite6.SndbufErrors = &value case "InCsumErrors": - procSnmp6.UdpLite6.InCsumErrors = value + procSnmp6.UdpLite6.InCsumErrors = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 06c556ef962..b278eb2c2df 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -102,6 +102,8 @@ type ProcStat struct { RSS int // Soft limit in bytes on the rss of the process. RSSLimit uint64 + // CPU number last executed on. + Processor uint // Real-time scheduling priority, a number in the range 1 to 99 for processes // scheduled under a real-time policy, or 0, for non-real-time processes. RTPriority uint @@ -184,7 +186,7 @@ func (p Proc) Stat() (ProcStat, error) { &ignoreUint64, &ignoreUint64, &ignoreInt64, - &ignoreInt64, + &s.Processor, &s.RTPriority, &s.Policy, &s.DelayAcctBlkIOTicks, diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 594022ded48..3d8c06439a9 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -96,10 +96,10 @@ func (p Proc) NewStatus() (ProcStatus, error) { kv := strings.SplitN(line, ":", 2) // removes spaces - k := string(strings.TrimSpace(kv[0])) - v := string(strings.TrimSpace(kv[1])) + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) // removes "kB" - v = string(bytes.Trim([]byte(v), " kB")) + v = strings.TrimSuffix(v, " kB") // value to int when possible // we can skip error check here, 'cause vKBytes is not used when value is a string diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 33f97caa08d..586af48af9f 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -62,7 +62,7 @@ type Stat struct { // Summed up cpu statistics. CPUTotal CPUStat // Per-CPU statistics. - CPU []CPUStat + CPU map[int64]CPUStat // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. IRQTotal uint64 // Number of times a numbered IRQ was triggered. @@ -170,10 +170,23 @@ func (fs FS) Stat() (Stat, error) { if err != nil { return Stat{}, err } + procStat, err := parseStat(bytes.NewReader(data), fileName) + if err != nil { + return Stat{}, err + } + return procStat, nil +} - stat := Stat{} +// parseStat parses the metrics from /proc/[pid]/stat. +func parseStat(r io.Reader, fileName string) (Stat, error) { + var ( + scanner = bufio.NewScanner(r) + stat = Stat{ + CPU: make(map[int64]CPUStat), + } + err error + ) - scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) @@ -228,9 +241,6 @@ func (fs FS) Stat() (Stat, error) { if cpuID == -1 { stat.CPUTotal = cpuStat } else { - for int64(len(stat.CPU)) <= cpuID { - stat.CPU = append(stat.CPU, CPUStat{}) - } stat.CPU[cpuID] = cpuStat } } diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go new file mode 100644 index 00000000000..f08bfc769db --- /dev/null +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -0,0 +1,79 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + + fsi "github.com/prometheus/procfs/internal/fs" +) + +// Provide access to /proc/PID/task/TID files, for thread specific values. Since +// such files have the same structure as /proc/PID/ ones, the data structures +// and the parsers for the latter may be reused. + +// AllThreads returns a list of all currently available threads under /proc/PID. +func AllThreads(pid int) (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllThreads(pid) +} + +// AllThreads returns a list of all currently available threads for PID. +func (fs FS) AllThreads(pid int) (Procs, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + d, err := os.Open(taskPath) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + } + + t := Procs{} + for _, n := range names { + tid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)}) + } + + return t, nil +} + +// Thread returns a process for a given PID, TID. +func (fs FS) Thread(pid, tid int) (Proc, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + if _, err := os.Stat(taskPath); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil +} + +// Thread returns a process for a given TID of Proc. +func (proc Proc) Thread(tid int) (Proc, error) { + tfs := fsi.FS(proc.path("task")) + if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: tfs}, nil +} diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index 20ceb77e2df..cdedcae996d 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -26,7 +26,9 @@ import ( ) // The VM interface is described at -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// // Each setting is exposed as a single file. // Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array // and numa_zonelist_order (deprecated) which is a string. diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.pb.go index faf37ea31c0..1376ba7a8b6 100644 --- a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.pb.go +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.pb.go @@ -304,8 +304,8 @@ type BatchCreateEntryRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The entries to be created. The entry ID field is output only, and will - // be ignored here. + // The entries to be created. If no entry ID is provided, one will be + // generated. Entries []*types.Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` // An output mask indicating the entry fields set in the response. OutputMask *types.EntryMask `protobuf:"bytes,2,opt,name=output_mask,json=outputMask,proto3" json:"output_mask,omitempty"` @@ -711,6 +711,196 @@ func (x *GetAuthorizedEntriesResponse) GetEntries() []*types.Entry { return nil } +type SyncAuthorizedEntriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An output mask indicating which fields are set in the response. + OutputMask *types.EntryMask `protobuf:"bytes,1,opt,name=output_mask,json=outputMask,proto3" json:"output_mask,omitempty"` + // IDs of the entries to fetch in full. Sent by the client in response to + // a sparse entry. + Ids []string `protobuf:"bytes,2,rep,name=ids,proto3" json:"ids,omitempty"` +} + +func (x *SyncAuthorizedEntriesRequest) Reset() { + *x = SyncAuthorizedEntriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyncAuthorizedEntriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncAuthorizedEntriesRequest) ProtoMessage() {} + +func (x *SyncAuthorizedEntriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncAuthorizedEntriesRequest.ProtoReflect.Descriptor instead. +func (*SyncAuthorizedEntriesRequest) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{13} +} + +func (x *SyncAuthorizedEntriesRequest) GetOutputMask() *types.EntryMask { + if x != nil { + return x.OutputMask + } + return nil +} + +func (x *SyncAuthorizedEntriesRequest) GetIds() []string { + if x != nil { + return x.Ids + } + return nil +} + +type SyncAuthorizedEntriesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The revisions of the authorized entries. This field is set when the + // authorized entry list meets or exceeds the server-determined page size. + // Callers use it to determine which entries are new/updated that they then + // request on the stream. + // See SyncAuthorizedEntries for details. + EntryRevisions []*EntryRevision `protobuf:"bytes,1,rep,name=entry_revisions,json=entryRevisions,proto3" json:"entry_revisions,omitempty"` + // The authorized entries. This field is set either 1) on the initial + // response if the number of authorized entries is less than the page size + // or 2) in response to the caller requesting the entries after determining + // they need to details based on entry revisions provided in a previous + // response. + // See SyncAuthorizedEntries for details. + Entries []*types.Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"` + // Whether there are more entries to sync down in this response phase. + More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"` +} + +func (x *SyncAuthorizedEntriesResponse) Reset() { + *x = SyncAuthorizedEntriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyncAuthorizedEntriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncAuthorizedEntriesResponse) ProtoMessage() {} + +func (x *SyncAuthorizedEntriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncAuthorizedEntriesResponse.ProtoReflect.Descriptor instead. +func (*SyncAuthorizedEntriesResponse) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{14} +} + +func (x *SyncAuthorizedEntriesResponse) GetEntryRevisions() []*EntryRevision { + if x != nil { + return x.EntryRevisions + } + return nil +} + +func (x *SyncAuthorizedEntriesResponse) GetEntries() []*types.Entry { + if x != nil { + return x.Entries + } + return nil +} + +func (x *SyncAuthorizedEntriesResponse) GetMore() bool { + if x != nil { + return x.More + } + return false +} + +type EntryRevision struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The entry ID. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // The entry revision number. + RevisionNumber int64 `protobuf:"varint,2,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"` +} + +func (x *EntryRevision) Reset() { + *x = EntryRevision{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EntryRevision) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntryRevision) ProtoMessage() {} + +func (x *EntryRevision) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EntryRevision.ProtoReflect.Descriptor instead. +func (*EntryRevision) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{15} +} + +func (x *EntryRevision) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *EntryRevision) GetRevisionNumber() int64 { + if x != nil { + return x.RevisionNumber + } + return 0 +} + type ListEntriesRequest_Filter struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -726,7 +916,7 @@ type ListEntriesRequest_Filter struct { func (x *ListEntriesRequest_Filter) Reset() { *x = ListEntriesRequest_Filter{} if protoimpl.UnsafeEnabled { - mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[13] + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -739,7 +929,7 @@ func (x *ListEntriesRequest_Filter) String() string { func (*ListEntriesRequest_Filter) ProtoMessage() {} func (x *ListEntriesRequest_Filter) ProtoReflect() protoreflect.Message { - mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[13] + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -809,7 +999,7 @@ type BatchCreateEntryResponse_Result struct { func (x *BatchCreateEntryResponse_Result) Reset() { *x = BatchCreateEntryResponse_Result{} if protoimpl.UnsafeEnabled { - mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[14] + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -822,7 +1012,7 @@ func (x *BatchCreateEntryResponse_Result) String() string { func (*BatchCreateEntryResponse_Result) ProtoMessage() {} func (x *BatchCreateEntryResponse_Result) ProtoReflect() protoreflect.Message { - mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[14] + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -868,7 +1058,7 @@ type BatchUpdateEntryResponse_Result struct { func (x *BatchUpdateEntryResponse_Result) Reset() { *x = BatchUpdateEntryResponse_Result{} if protoimpl.UnsafeEnabled { - mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[15] + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -881,7 +1071,7 @@ func (x *BatchUpdateEntryResponse_Result) String() string { func (*BatchUpdateEntryResponse_Result) ProtoMessage() {} func (x *BatchUpdateEntryResponse_Result) ProtoReflect() protoreflect.Message { - mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[15] + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -925,7 +1115,7 @@ type BatchDeleteEntryResponse_Result struct { func (x *BatchDeleteEntryResponse_Result) Reset() { *x = BatchDeleteEntryResponse_Result{} if protoimpl.UnsafeEnabled { - mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[16] + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -938,7 +1128,7 @@ func (x *BatchDeleteEntryResponse_Result) String() string { func (*BatchDeleteEntryResponse_Result) ProtoMessage() {} func (x *BatchDeleteEntryResponse_Result) ProtoReflect() protoreflect.Message { - mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[16] + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1113,64 +1303,96 @@ var file_spire_api_server_entry_v1_entry_proto_rawDesc = []byte{ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x32, 0xb7, 0x06, 0x0a, 0x05, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x6f, 0x0a, 0x0c, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, - 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, - 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2e, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x4e, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x2a, 0x2e, - 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73, 0x70, 0x69, 0x72, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x7b, 0x0a, 0x10, 0x42, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x32, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, + 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0x6d, 0x0a, 0x1c, 0x53, 0x79, 0x6e, 0x63, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0xb8, 0x01, 0x0a, 0x1d, 0x53, 0x79, 0x6e, 0x63, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0f, 0x65, 0x6e, 0x74, + 0x72, 0x79, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x07, + 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x6d, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x6d, 0x6f, + 0x72, 0x65, 0x22, 0x48, 0x0a, 0x0d, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x76, 0x69, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, + 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x32, 0xc8, 0x07, 0x0a, + 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x6f, 0x0a, 0x0c, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x45, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x45, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, - 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x73, 0x70, 0x69, 0x72, + 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x2a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, + 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x7b, 0x0a, 0x10, 0x42, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x32, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, - 0x0a, 0x10, 0x42, 0x61, 0x74, 0x63, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x32, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, - 0x61, 0x74, 0x63, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x10, 0x42, 0x61, 0x74, 0x63, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x32, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x10, 0x42, - 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x32, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, - 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x73, 0x70, 0x69, + 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x7b, 0x0a, 0x10, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x32, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x87, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, - 0x73, 0x12, 0x36, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, - 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, - 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x73, 0x70, 0x69, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x87, 0x01, 0x0a, + 0x14, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x36, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8e, 0x01, 0x0a, 0x15, 0x53, 0x79, 0x6e, 0x63, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, + 0x12, 0x37, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, + 0x63, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, - 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x42, 0x49, 0x5a, 0x47, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, - 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, - 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x65, 0x6e, 0x74, - 0x72, 0x79, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x76, 0x31, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x49, 0x5a, 0x47, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, + 0x72, 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1185,7 +1407,7 @@ func file_spire_api_server_entry_v1_entry_proto_rawDescGZIP() []byte { return file_spire_api_server_entry_v1_entry_proto_rawDescData } -var file_spire_api_server_entry_v1_entry_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_spire_api_server_entry_v1_entry_proto_msgTypes = make([]protoimpl.MessageInfo, 20) var file_spire_api_server_entry_v1_entry_proto_goTypes = []interface{}{ (*CountEntriesRequest)(nil), // 0: spire.api.server.entry.v1.CountEntriesRequest (*CountEntriesResponse)(nil), // 1: spire.api.server.entry.v1.CountEntriesResponse @@ -1200,62 +1422,70 @@ var file_spire_api_server_entry_v1_entry_proto_goTypes = []interface{}{ (*BatchDeleteEntryResponse)(nil), // 10: spire.api.server.entry.v1.BatchDeleteEntryResponse (*GetAuthorizedEntriesRequest)(nil), // 11: spire.api.server.entry.v1.GetAuthorizedEntriesRequest (*GetAuthorizedEntriesResponse)(nil), // 12: spire.api.server.entry.v1.GetAuthorizedEntriesResponse - (*ListEntriesRequest_Filter)(nil), // 13: spire.api.server.entry.v1.ListEntriesRequest.Filter - (*BatchCreateEntryResponse_Result)(nil), // 14: spire.api.server.entry.v1.BatchCreateEntryResponse.Result - (*BatchUpdateEntryResponse_Result)(nil), // 15: spire.api.server.entry.v1.BatchUpdateEntryResponse.Result - (*BatchDeleteEntryResponse_Result)(nil), // 16: spire.api.server.entry.v1.BatchDeleteEntryResponse.Result - (*types.EntryMask)(nil), // 17: spire.api.types.EntryMask - (*types.Entry)(nil), // 18: spire.api.types.Entry - (*types.SPIFFEID)(nil), // 19: spire.api.types.SPIFFEID - (*types.SelectorMatch)(nil), // 20: spire.api.types.SelectorMatch - (*types.FederatesWithMatch)(nil), // 21: spire.api.types.FederatesWithMatch - (*wrapperspb.StringValue)(nil), // 22: google.protobuf.StringValue - (*types.Status)(nil), // 23: spire.api.types.Status + (*SyncAuthorizedEntriesRequest)(nil), // 13: spire.api.server.entry.v1.SyncAuthorizedEntriesRequest + (*SyncAuthorizedEntriesResponse)(nil), // 14: spire.api.server.entry.v1.SyncAuthorizedEntriesResponse + (*EntryRevision)(nil), // 15: spire.api.server.entry.v1.EntryRevision + (*ListEntriesRequest_Filter)(nil), // 16: spire.api.server.entry.v1.ListEntriesRequest.Filter + (*BatchCreateEntryResponse_Result)(nil), // 17: spire.api.server.entry.v1.BatchCreateEntryResponse.Result + (*BatchUpdateEntryResponse_Result)(nil), // 18: spire.api.server.entry.v1.BatchUpdateEntryResponse.Result + (*BatchDeleteEntryResponse_Result)(nil), // 19: spire.api.server.entry.v1.BatchDeleteEntryResponse.Result + (*types.EntryMask)(nil), // 20: spire.api.types.EntryMask + (*types.Entry)(nil), // 21: spire.api.types.Entry + (*types.SPIFFEID)(nil), // 22: spire.api.types.SPIFFEID + (*types.SelectorMatch)(nil), // 23: spire.api.types.SelectorMatch + (*types.FederatesWithMatch)(nil), // 24: spire.api.types.FederatesWithMatch + (*wrapperspb.StringValue)(nil), // 25: google.protobuf.StringValue + (*types.Status)(nil), // 26: spire.api.types.Status } var file_spire_api_server_entry_v1_entry_proto_depIdxs = []int32{ - 13, // 0: spire.api.server.entry.v1.ListEntriesRequest.filter:type_name -> spire.api.server.entry.v1.ListEntriesRequest.Filter - 17, // 1: spire.api.server.entry.v1.ListEntriesRequest.output_mask:type_name -> spire.api.types.EntryMask - 18, // 2: spire.api.server.entry.v1.ListEntriesResponse.entries:type_name -> spire.api.types.Entry - 17, // 3: spire.api.server.entry.v1.GetEntryRequest.output_mask:type_name -> spire.api.types.EntryMask - 18, // 4: spire.api.server.entry.v1.BatchCreateEntryRequest.entries:type_name -> spire.api.types.Entry - 17, // 5: spire.api.server.entry.v1.BatchCreateEntryRequest.output_mask:type_name -> spire.api.types.EntryMask - 14, // 6: spire.api.server.entry.v1.BatchCreateEntryResponse.results:type_name -> spire.api.server.entry.v1.BatchCreateEntryResponse.Result - 18, // 7: spire.api.server.entry.v1.BatchUpdateEntryRequest.entries:type_name -> spire.api.types.Entry - 17, // 8: spire.api.server.entry.v1.BatchUpdateEntryRequest.input_mask:type_name -> spire.api.types.EntryMask - 17, // 9: spire.api.server.entry.v1.BatchUpdateEntryRequest.output_mask:type_name -> spire.api.types.EntryMask - 15, // 10: spire.api.server.entry.v1.BatchUpdateEntryResponse.results:type_name -> spire.api.server.entry.v1.BatchUpdateEntryResponse.Result - 16, // 11: spire.api.server.entry.v1.BatchDeleteEntryResponse.results:type_name -> spire.api.server.entry.v1.BatchDeleteEntryResponse.Result - 17, // 12: spire.api.server.entry.v1.GetAuthorizedEntriesRequest.output_mask:type_name -> spire.api.types.EntryMask - 18, // 13: spire.api.server.entry.v1.GetAuthorizedEntriesResponse.entries:type_name -> spire.api.types.Entry - 19, // 14: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_spiffe_id:type_name -> spire.api.types.SPIFFEID - 19, // 15: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_parent_id:type_name -> spire.api.types.SPIFFEID - 20, // 16: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_selectors:type_name -> spire.api.types.SelectorMatch - 21, // 17: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_federates_with:type_name -> spire.api.types.FederatesWithMatch - 22, // 18: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_hint:type_name -> google.protobuf.StringValue - 23, // 19: spire.api.server.entry.v1.BatchCreateEntryResponse.Result.status:type_name -> spire.api.types.Status - 18, // 20: spire.api.server.entry.v1.BatchCreateEntryResponse.Result.entry:type_name -> spire.api.types.Entry - 23, // 21: spire.api.server.entry.v1.BatchUpdateEntryResponse.Result.status:type_name -> spire.api.types.Status - 18, // 22: spire.api.server.entry.v1.BatchUpdateEntryResponse.Result.entry:type_name -> spire.api.types.Entry - 23, // 23: spire.api.server.entry.v1.BatchDeleteEntryResponse.Result.status:type_name -> spire.api.types.Status - 0, // 24: spire.api.server.entry.v1.Entry.CountEntries:input_type -> spire.api.server.entry.v1.CountEntriesRequest - 2, // 25: spire.api.server.entry.v1.Entry.ListEntries:input_type -> spire.api.server.entry.v1.ListEntriesRequest - 4, // 26: spire.api.server.entry.v1.Entry.GetEntry:input_type -> spire.api.server.entry.v1.GetEntryRequest - 5, // 27: spire.api.server.entry.v1.Entry.BatchCreateEntry:input_type -> spire.api.server.entry.v1.BatchCreateEntryRequest - 7, // 28: spire.api.server.entry.v1.Entry.BatchUpdateEntry:input_type -> spire.api.server.entry.v1.BatchUpdateEntryRequest - 9, // 29: spire.api.server.entry.v1.Entry.BatchDeleteEntry:input_type -> spire.api.server.entry.v1.BatchDeleteEntryRequest - 11, // 30: spire.api.server.entry.v1.Entry.GetAuthorizedEntries:input_type -> spire.api.server.entry.v1.GetAuthorizedEntriesRequest - 1, // 31: spire.api.server.entry.v1.Entry.CountEntries:output_type -> spire.api.server.entry.v1.CountEntriesResponse - 3, // 32: spire.api.server.entry.v1.Entry.ListEntries:output_type -> spire.api.server.entry.v1.ListEntriesResponse - 18, // 33: spire.api.server.entry.v1.Entry.GetEntry:output_type -> spire.api.types.Entry - 6, // 34: spire.api.server.entry.v1.Entry.BatchCreateEntry:output_type -> spire.api.server.entry.v1.BatchCreateEntryResponse - 8, // 35: spire.api.server.entry.v1.Entry.BatchUpdateEntry:output_type -> spire.api.server.entry.v1.BatchUpdateEntryResponse - 10, // 36: spire.api.server.entry.v1.Entry.BatchDeleteEntry:output_type -> spire.api.server.entry.v1.BatchDeleteEntryResponse - 12, // 37: spire.api.server.entry.v1.Entry.GetAuthorizedEntries:output_type -> spire.api.server.entry.v1.GetAuthorizedEntriesResponse - 31, // [31:38] is the sub-list for method output_type - 24, // [24:31] is the sub-list for method input_type - 24, // [24:24] is the sub-list for extension type_name - 24, // [24:24] is the sub-list for extension extendee - 0, // [0:24] is the sub-list for field type_name + 16, // 0: spire.api.server.entry.v1.ListEntriesRequest.filter:type_name -> spire.api.server.entry.v1.ListEntriesRequest.Filter + 20, // 1: spire.api.server.entry.v1.ListEntriesRequest.output_mask:type_name -> spire.api.types.EntryMask + 21, // 2: spire.api.server.entry.v1.ListEntriesResponse.entries:type_name -> spire.api.types.Entry + 20, // 3: spire.api.server.entry.v1.GetEntryRequest.output_mask:type_name -> spire.api.types.EntryMask + 21, // 4: spire.api.server.entry.v1.BatchCreateEntryRequest.entries:type_name -> spire.api.types.Entry + 20, // 5: spire.api.server.entry.v1.BatchCreateEntryRequest.output_mask:type_name -> spire.api.types.EntryMask + 17, // 6: spire.api.server.entry.v1.BatchCreateEntryResponse.results:type_name -> spire.api.server.entry.v1.BatchCreateEntryResponse.Result + 21, // 7: spire.api.server.entry.v1.BatchUpdateEntryRequest.entries:type_name -> spire.api.types.Entry + 20, // 8: spire.api.server.entry.v1.BatchUpdateEntryRequest.input_mask:type_name -> spire.api.types.EntryMask + 20, // 9: spire.api.server.entry.v1.BatchUpdateEntryRequest.output_mask:type_name -> spire.api.types.EntryMask + 18, // 10: spire.api.server.entry.v1.BatchUpdateEntryResponse.results:type_name -> spire.api.server.entry.v1.BatchUpdateEntryResponse.Result + 19, // 11: spire.api.server.entry.v1.BatchDeleteEntryResponse.results:type_name -> spire.api.server.entry.v1.BatchDeleteEntryResponse.Result + 20, // 12: spire.api.server.entry.v1.GetAuthorizedEntriesRequest.output_mask:type_name -> spire.api.types.EntryMask + 21, // 13: spire.api.server.entry.v1.GetAuthorizedEntriesResponse.entries:type_name -> spire.api.types.Entry + 20, // 14: spire.api.server.entry.v1.SyncAuthorizedEntriesRequest.output_mask:type_name -> spire.api.types.EntryMask + 15, // 15: spire.api.server.entry.v1.SyncAuthorizedEntriesResponse.entry_revisions:type_name -> spire.api.server.entry.v1.EntryRevision + 21, // 16: spire.api.server.entry.v1.SyncAuthorizedEntriesResponse.entries:type_name -> spire.api.types.Entry + 22, // 17: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_spiffe_id:type_name -> spire.api.types.SPIFFEID + 22, // 18: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_parent_id:type_name -> spire.api.types.SPIFFEID + 23, // 19: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_selectors:type_name -> spire.api.types.SelectorMatch + 24, // 20: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_federates_with:type_name -> spire.api.types.FederatesWithMatch + 25, // 21: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_hint:type_name -> google.protobuf.StringValue + 26, // 22: spire.api.server.entry.v1.BatchCreateEntryResponse.Result.status:type_name -> spire.api.types.Status + 21, // 23: spire.api.server.entry.v1.BatchCreateEntryResponse.Result.entry:type_name -> spire.api.types.Entry + 26, // 24: spire.api.server.entry.v1.BatchUpdateEntryResponse.Result.status:type_name -> spire.api.types.Status + 21, // 25: spire.api.server.entry.v1.BatchUpdateEntryResponse.Result.entry:type_name -> spire.api.types.Entry + 26, // 26: spire.api.server.entry.v1.BatchDeleteEntryResponse.Result.status:type_name -> spire.api.types.Status + 0, // 27: spire.api.server.entry.v1.Entry.CountEntries:input_type -> spire.api.server.entry.v1.CountEntriesRequest + 2, // 28: spire.api.server.entry.v1.Entry.ListEntries:input_type -> spire.api.server.entry.v1.ListEntriesRequest + 4, // 29: spire.api.server.entry.v1.Entry.GetEntry:input_type -> spire.api.server.entry.v1.GetEntryRequest + 5, // 30: spire.api.server.entry.v1.Entry.BatchCreateEntry:input_type -> spire.api.server.entry.v1.BatchCreateEntryRequest + 7, // 31: spire.api.server.entry.v1.Entry.BatchUpdateEntry:input_type -> spire.api.server.entry.v1.BatchUpdateEntryRequest + 9, // 32: spire.api.server.entry.v1.Entry.BatchDeleteEntry:input_type -> spire.api.server.entry.v1.BatchDeleteEntryRequest + 11, // 33: spire.api.server.entry.v1.Entry.GetAuthorizedEntries:input_type -> spire.api.server.entry.v1.GetAuthorizedEntriesRequest + 13, // 34: spire.api.server.entry.v1.Entry.SyncAuthorizedEntries:input_type -> spire.api.server.entry.v1.SyncAuthorizedEntriesRequest + 1, // 35: spire.api.server.entry.v1.Entry.CountEntries:output_type -> spire.api.server.entry.v1.CountEntriesResponse + 3, // 36: spire.api.server.entry.v1.Entry.ListEntries:output_type -> spire.api.server.entry.v1.ListEntriesResponse + 21, // 37: spire.api.server.entry.v1.Entry.GetEntry:output_type -> spire.api.types.Entry + 6, // 38: spire.api.server.entry.v1.Entry.BatchCreateEntry:output_type -> spire.api.server.entry.v1.BatchCreateEntryResponse + 8, // 39: spire.api.server.entry.v1.Entry.BatchUpdateEntry:output_type -> spire.api.server.entry.v1.BatchUpdateEntryResponse + 10, // 40: spire.api.server.entry.v1.Entry.BatchDeleteEntry:output_type -> spire.api.server.entry.v1.BatchDeleteEntryResponse + 12, // 41: spire.api.server.entry.v1.Entry.GetAuthorizedEntries:output_type -> spire.api.server.entry.v1.GetAuthorizedEntriesResponse + 14, // 42: spire.api.server.entry.v1.Entry.SyncAuthorizedEntries:output_type -> spire.api.server.entry.v1.SyncAuthorizedEntriesResponse + 35, // [35:43] is the sub-list for method output_type + 27, // [27:35] is the sub-list for method input_type + 27, // [27:27] is the sub-list for extension type_name + 27, // [27:27] is the sub-list for extension extendee + 0, // [0:27] is the sub-list for field type_name } func init() { file_spire_api_server_entry_v1_entry_proto_init() } @@ -1421,7 +1651,7 @@ func file_spire_api_server_entry_v1_entry_proto_init() { } } file_spire_api_server_entry_v1_entry_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListEntriesRequest_Filter); i { + switch v := v.(*SyncAuthorizedEntriesRequest); i { case 0: return &v.state case 1: @@ -1433,7 +1663,7 @@ func file_spire_api_server_entry_v1_entry_proto_init() { } } file_spire_api_server_entry_v1_entry_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BatchCreateEntryResponse_Result); i { + switch v := v.(*SyncAuthorizedEntriesResponse); i { case 0: return &v.state case 1: @@ -1445,7 +1675,7 @@ func file_spire_api_server_entry_v1_entry_proto_init() { } } file_spire_api_server_entry_v1_entry_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BatchUpdateEntryResponse_Result); i { + switch v := v.(*EntryRevision); i { case 0: return &v.state case 1: @@ -1457,6 +1687,42 @@ func file_spire_api_server_entry_v1_entry_proto_init() { } } file_spire_api_server_entry_v1_entry_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEntriesRequest_Filter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchCreateEntryResponse_Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchUpdateEntryResponse_Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BatchDeleteEntryResponse_Result); i { case 0: return &v.state @@ -1475,7 +1741,7 @@ func file_spire_api_server_entry_v1_entry_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_spire_api_server_entry_v1_entry_proto_rawDesc, NumEnums: 0, - NumMessages: 17, + NumMessages: 20, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.proto index 3445e8df2fd..aab4f13b828 100644 --- a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.proto +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.proto @@ -46,6 +46,25 @@ service Entry { // The caller must present an active agent X509-SVID. See the Agent // AttestAgent/RenewAgent RPCs. rpc GetAuthorizedEntries(GetAuthorizedEntriesRequest) returns (GetAuthorizedEntriesResponse); + + // Syncs authorized entries down to the caller. The caller controls which + // entries the server sends down full details for. The flow is as follows: + // 1. Caller opens up sync stream + // 2. Server determines authorized entries for caller: + // - If there are less entries than a server-determined page size, go to (5). + // - Otherwise, go to (3). + // 3. Server pages entry revisions to the caller (contains the entry ID and + // revision number). The "more" flag set for all pages but the last so + // that the caller knows when the server is done. + // 4. Client determines which entries are new or updated (based on revision + // number) and asks for them by sending a request with the IDs. + // 5. Server pages down entries to the caller for each ID identified in (4) + // or every entry in (2) if the number of entries was less than the + // server-determined page size. The "more" flag set for all pages but + // the last so that the caller knows when the server is done. + // 6. Steps (4) and (5) are repeated until the caller has synced down the + // details for all new/updated entries and closes the stream. + rpc SyncAuthorizedEntries(stream SyncAuthorizedEntriesRequest) returns (stream SyncAuthorizedEntriesResponse); } message CountEntriesRequest { @@ -97,8 +116,8 @@ message GetEntryRequest { } message BatchCreateEntryRequest { - // The entries to be created. The entry ID field is output only, and will - // be ignored here. + // The entries to be created. If no entry ID is provided, one will be + // generated. repeated spire.api.types.Entry entries = 1; // An output mask indicating the entry fields set in the response. @@ -176,3 +195,40 @@ message GetAuthorizedEntriesResponse { // The authorized entries. repeated spire.api.types.Entry entries = 1; } + +message SyncAuthorizedEntriesRequest { + // An output mask indicating which fields are set in the response. + spire.api.types.EntryMask output_mask = 1; + + // IDs of the entries to fetch in full. Sent by the client in response to + // a sparse entry. + repeated string ids = 2; +} + +message SyncAuthorizedEntriesResponse { + // The revisions of the authorized entries. This field is set when the + // authorized entry list meets or exceeds the server-determined page size. + // Callers use it to determine which entries are new/updated that they then + // request on the stream. + // See SyncAuthorizedEntries for details. + repeated EntryRevision entry_revisions = 1; + + // The authorized entries. This field is set either 1) on the initial + // response if the number of authorized entries is less than the page size + // or 2) in response to the caller requesting the entries after determining + // they need to details based on entry revisions provided in a previous + // response. + // See SyncAuthorizedEntries for details. + repeated spire.api.types.Entry entries = 2; + + // Whether there are more entries to sync down in this response phase. + bool more = 3; +} + +message EntryRevision { + // The entry ID. + string id = 1; + + // The entry revision number. + int64 revision_number = 2; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry_grpc.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry_grpc.pb.go index d0f6b9d0f64..bc692fe7a6f 100644 --- a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry_grpc.pb.go +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry_grpc.pb.go @@ -47,6 +47,24 @@ type EntryClient interface { // The caller must present an active agent X509-SVID. See the Agent // AttestAgent/RenewAgent RPCs. GetAuthorizedEntries(ctx context.Context, in *GetAuthorizedEntriesRequest, opts ...grpc.CallOption) (*GetAuthorizedEntriesResponse, error) + // Syncs authorized entries down to the caller. The caller controls which + // entries the server sends down full details for. The flow is as follows: + // 1. Caller opens up sync stream + // 2. Server determines authorized entries for caller: + // - If there are less entries than a server-determined page size, go to (5). + // - Otherwise, go to (3). + // 3. Server pages entry revisions to the caller (contains the entry ID and + // revision number). The "more" flag set for all pages but the last so + // that the caller knows when the server is done. + // 4. Client determines which entries are new or updated (based on revision + // number) and asks for them by sending a request with the IDs. + // 5. Server pages down entries to the caller for each ID identified in (4) + // or every entry in (2) if the number of entries was less than the + // server-determined page size. The "more" flag set for all pages but + // the last so that the caller knows when the server is done. + // 6. Steps (4) and (5) are repeated until the caller has synced down the + // details for all new/updated entries and closes the stream. + SyncAuthorizedEntries(ctx context.Context, opts ...grpc.CallOption) (Entry_SyncAuthorizedEntriesClient, error) } type entryClient struct { @@ -120,6 +138,37 @@ func (c *entryClient) GetAuthorizedEntries(ctx context.Context, in *GetAuthorize return out, nil } +func (c *entryClient) SyncAuthorizedEntries(ctx context.Context, opts ...grpc.CallOption) (Entry_SyncAuthorizedEntriesClient, error) { + stream, err := c.cc.NewStream(ctx, &_Entry_serviceDesc.Streams[0], "/spire.api.server.entry.v1.Entry/SyncAuthorizedEntries", opts...) + if err != nil { + return nil, err + } + x := &entrySyncAuthorizedEntriesClient{stream} + return x, nil +} + +type Entry_SyncAuthorizedEntriesClient interface { + Send(*SyncAuthorizedEntriesRequest) error + Recv() (*SyncAuthorizedEntriesResponse, error) + grpc.ClientStream +} + +type entrySyncAuthorizedEntriesClient struct { + grpc.ClientStream +} + +func (x *entrySyncAuthorizedEntriesClient) Send(m *SyncAuthorizedEntriesRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *entrySyncAuthorizedEntriesClient) Recv() (*SyncAuthorizedEntriesResponse, error) { + m := new(SyncAuthorizedEntriesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // EntryServer is the server API for Entry service. // All implementations must embed UnimplementedEntryServer // for forward compatibility @@ -153,6 +202,24 @@ type EntryServer interface { // The caller must present an active agent X509-SVID. See the Agent // AttestAgent/RenewAgent RPCs. GetAuthorizedEntries(context.Context, *GetAuthorizedEntriesRequest) (*GetAuthorizedEntriesResponse, error) + // Syncs authorized entries down to the caller. The caller controls which + // entries the server sends down full details for. The flow is as follows: + // 1. Caller opens up sync stream + // 2. Server determines authorized entries for caller: + // - If there are less entries than a server-determined page size, go to (5). + // - Otherwise, go to (3). + // 3. Server pages entry revisions to the caller (contains the entry ID and + // revision number). The "more" flag set for all pages but the last so + // that the caller knows when the server is done. + // 4. Client determines which entries are new or updated (based on revision + // number) and asks for them by sending a request with the IDs. + // 5. Server pages down entries to the caller for each ID identified in (4) + // or every entry in (2) if the number of entries was less than the + // server-determined page size. The "more" flag set for all pages but + // the last so that the caller knows when the server is done. + // 6. Steps (4) and (5) are repeated until the caller has synced down the + // details for all new/updated entries and closes the stream. + SyncAuthorizedEntries(Entry_SyncAuthorizedEntriesServer) error mustEmbedUnimplementedEntryServer() } @@ -181,6 +248,9 @@ func (UnimplementedEntryServer) BatchDeleteEntry(context.Context, *BatchDeleteEn func (UnimplementedEntryServer) GetAuthorizedEntries(context.Context, *GetAuthorizedEntriesRequest) (*GetAuthorizedEntriesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetAuthorizedEntries not implemented") } +func (UnimplementedEntryServer) SyncAuthorizedEntries(Entry_SyncAuthorizedEntriesServer) error { + return status.Errorf(codes.Unimplemented, "method SyncAuthorizedEntries not implemented") +} func (UnimplementedEntryServer) mustEmbedUnimplementedEntryServer() {} // UnsafeEntryServer may be embedded to opt out of forward compatibility for this service. @@ -320,6 +390,32 @@ func _Entry_GetAuthorizedEntries_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } +func _Entry_SyncAuthorizedEntries_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(EntryServer).SyncAuthorizedEntries(&entrySyncAuthorizedEntriesServer{stream}) +} + +type Entry_SyncAuthorizedEntriesServer interface { + Send(*SyncAuthorizedEntriesResponse) error + Recv() (*SyncAuthorizedEntriesRequest, error) + grpc.ServerStream +} + +type entrySyncAuthorizedEntriesServer struct { + grpc.ServerStream +} + +func (x *entrySyncAuthorizedEntriesServer) Send(m *SyncAuthorizedEntriesResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *entrySyncAuthorizedEntriesServer) Recv() (*SyncAuthorizedEntriesRequest, error) { + m := new(SyncAuthorizedEntriesRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + var _Entry_serviceDesc = grpc.ServiceDesc{ ServiceName: "spire.api.server.entry.v1.Entry", HandlerType: (*EntryServer)(nil), @@ -353,6 +449,13 @@ var _Entry_serviceDesc = grpc.ServiceDesc{ Handler: _Entry_GetAuthorizedEntries_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SyncAuthorizedEntries", + Handler: _Entry_SyncAuthorizedEntries_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, Metadata: "spire/api/server/entry/v1/entry.proto", } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go new file mode 100644 index 00000000000..67f8d733999 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -0,0 +1,229 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" +) + +const ( + // ScopeName is the instrumentation scope name. + ScopeName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + // GRPCStatusCodeKey is convention for numeric status code of a gRPC request. + GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +// Filter is a predicate used to determine whether a given request in +// interceptor info should be traced. A Filter must return true if +// the request should be traced. +type Filter func(*InterceptorInfo) bool + +// config is a group of options for this instrumentation. +type config struct { + Filter Filter + Propagators propagation.TextMapPropagator + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + SpanStartOptions []trace.SpanStartOption + + ReceivedEvent bool + SentEvent bool + + tracer trace.Tracer + meter metric.Meter + + rpcDuration metric.Float64Histogram + rpcRequestSize metric.Int64Histogram + rpcResponseSize metric.Int64Histogram + rpcRequestsPerRPC metric.Int64Histogram + rpcResponsesPerRPC metric.Int64Histogram +} + +// Option applies an option value for a config. +type Option interface { + apply(*config) +} + +// newConfig returns a config configured with all the passed Options. +func newConfig(opts []Option, role string) *config { + c := &config{ + Propagators: otel.GetTextMapPropagator(), + TracerProvider: otel.GetTracerProvider(), + MeterProvider: otel.GetMeterProvider(), + } + for _, o := range opts { + o.apply(c) + } + + c.tracer = c.TracerProvider.Tracer( + ScopeName, + trace.WithInstrumentationVersion(SemVersion()), + ) + + c.meter = c.MeterProvider.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + var err error + c.rpcDuration, err = c.meter.Float64Histogram("rpc."+role+".duration", + metric.WithDescription("Measures the duration of inbound RPC."), + metric.WithUnit("ms")) + if err != nil { + otel.Handle(err) + } + + c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size", + metric.WithDescription("Measures size of RPC request messages (uncompressed)."), + metric.WithUnit("By")) + if err != nil { + otel.Handle(err) + } + + c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size", + metric.WithDescription("Measures size of RPC response messages (uncompressed)."), + metric.WithUnit("By")) + if err != nil { + otel.Handle(err) + } + + c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", + metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), + metric.WithUnit("{count}")) + if err != nil { + otel.Handle(err) + } + + c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", + metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), + metric.WithUnit("{count}")) + if err != nil { + otel.Handle(err) + } + + return c +} + +type propagatorsOption struct{ p propagation.TextMapPropagator } + +func (o propagatorsOption) apply(c *config) { + if o.p != nil { + c.Propagators = o.p + } +} + +// WithPropagators returns an Option to use the Propagators when extracting +// and injecting trace context from requests. +func WithPropagators(p propagation.TextMapPropagator) Option { + return propagatorsOption{p: p} +} + +type tracerProviderOption struct{ tp trace.TracerProvider } + +func (o tracerProviderOption) apply(c *config) { + if o.tp != nil { + c.TracerProvider = o.tp + } +} + +// WithInterceptorFilter returns an Option to use the request filter. +// +// Deprecated: Use stats handlers instead. +func WithInterceptorFilter(f Filter) Option { + return interceptorFilterOption{f: f} +} + +type interceptorFilterOption struct { + f Filter +} + +func (o interceptorFilterOption) apply(c *config) { + if o.f != nil { + c.Filter = o.f + } +} + +// WithTracerProvider returns an Option to use the TracerProvider when +// creating a Tracer. +func WithTracerProvider(tp trace.TracerProvider) Option { + return tracerProviderOption{tp: tp} +} + +type meterProviderOption struct{ mp metric.MeterProvider } + +func (o meterProviderOption) apply(c *config) { + if o.mp != nil { + c.MeterProvider = o.mp + } +} + +// WithMeterProvider returns an Option to use the MeterProvider when +// creating a Meter. If this option is not provide the global MeterProvider will be used. +func WithMeterProvider(mp metric.MeterProvider) Option { + return meterProviderOption{mp: mp} +} + +// Event type that can be recorded, see WithMessageEvents. +type Event int + +// Different types of events that can be recorded, see WithMessageEvents. +const ( + ReceivedEvents Event = iota + SentEvents +) + +type messageEventsProviderOption struct { + events []Event +} + +func (m messageEventsProviderOption) apply(c *config) { + for _, e := range m.events { + switch e { + case ReceivedEvents: + c.ReceivedEvent = true + case SentEvents: + c.SentEvent = true + } + } +} + +// WithMessageEvents configures the Handler to record the specified events +// (span.AddEvent) on spans. By default only summary attributes are added at the +// end of the request. +// +// Valid events are: +// - ReceivedEvents: Record the number of bytes read after every gRPC read operation. +// - SentEvents: Record the number of bytes written after every gRPC write operation. +func WithMessageEvents(events ...Event) Option { + return messageEventsProviderOption{events: events} +} + +type spanStartOption struct{ opts []trace.SpanStartOption } + +func (o spanStartOption) apply(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, o.opts...) +} + +// WithSpanOptions configures an additional set of +// trace.SpanOptions, which are applied to each new span. +func WithSpanOptions(opts ...trace.SpanStartOption) Option { + return spanStartOption{opts} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go new file mode 100644 index 00000000000..958dcd87a4c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package otelgrpc is the instrumentation library for [google.golang.org/grpc]. + +Use [NewClientHandler] with [grpc.WithStatsHandler] to instrument a gRPC client. + +Use [NewServerHandler] with [grpc.StatsHandler] to instrument a gRPC server. +*/ +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go new file mode 100644 index 00000000000..3b487a93623 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -0,0 +1,540 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +// gRPC tracing middleware +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md +import ( + "context" + "io" + "net" + "strconv" + "time" + + "google.golang.org/grpc" + grpc_codes "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" +) + +type messageType attribute.KeyValue + +// Event adds an event of the messageType to the span associated with the +// passed context with a message id. +func (m messageType) Event(ctx context.Context, id int, _ interface{}) { + span := trace.SpanFromContext(ctx) + if !span.IsRecording() { + return + } + span.AddEvent("message", trace.WithAttributes( + attribute.KeyValue(m), + RPCMessageIDKey.Int(id), + )) +} + +var ( + messageSent = messageType(RPCMessageTypeSent) + messageReceived = messageType(RPCMessageTypeReceived) +) + +// UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable +// for use in a grpc.Dial call. +// +// Deprecated: Use [NewClientHandler] instead. +func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { + cfg := newConfig(opts, "client") + tracer := cfg.TracerProvider.Tracer( + ScopeName, + trace.WithInstrumentationVersion(Version()), + ) + + return func( + ctx context.Context, + method string, + req, reply interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + callOpts ...grpc.CallOption, + ) error { + i := &InterceptorInfo{ + Method: method, + Type: UnaryClient, + } + if cfg.Filter != nil && !cfg.Filter(i) { + return invoker(ctx, method, req, reply, cc, callOpts...) + } + + name, attr, _ := telemetryAttributes(method, cc.Target()) + + startOpts := append([]trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(attr...), + }, + cfg.SpanStartOptions..., + ) + + ctx, span := tracer.Start( + ctx, + name, + startOpts..., + ) + defer span.End() + + ctx = inject(ctx, cfg.Propagators) + + if cfg.SentEvent { + messageSent.Event(ctx, 1, req) + } + + err := invoker(ctx, method, req, reply, cc, callOpts...) + + if cfg.ReceivedEvent { + messageReceived.Event(ctx, 1, reply) + } + + if err != nil { + s, _ := status.FromError(err) + span.SetStatus(codes.Error, s.Message()) + span.SetAttributes(statusCodeAttr(s.Code())) + } else { + span.SetAttributes(statusCodeAttr(grpc_codes.OK)) + } + + return err + } +} + +// clientStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and +// SendMsg method call. +type clientStream struct { + grpc.ClientStream + desc *grpc.StreamDesc + + span trace.Span + + receivedEvent bool + sentEvent bool + + receivedMessageID int + sentMessageID int +} + +var _ = proto.Marshal + +func (w *clientStream) RecvMsg(m interface{}) error { + err := w.ClientStream.RecvMsg(m) + + if err == nil && !w.desc.ServerStreams { + w.endSpan(nil) + } else if err == io.EOF { + w.endSpan(nil) + } else if err != nil { + w.endSpan(err) + } else { + w.receivedMessageID++ + + if w.receivedEvent { + messageReceived.Event(w.Context(), w.receivedMessageID, m) + } + } + + return err +} + +func (w *clientStream) SendMsg(m interface{}) error { + err := w.ClientStream.SendMsg(m) + + w.sentMessageID++ + + if w.sentEvent { + messageSent.Event(w.Context(), w.sentMessageID, m) + } + + if err != nil { + w.endSpan(err) + } + + return err +} + +func (w *clientStream) Header() (metadata.MD, error) { + md, err := w.ClientStream.Header() + if err != nil { + w.endSpan(err) + } + + return md, err +} + +func (w *clientStream) CloseSend() error { + err := w.ClientStream.CloseSend() + if err != nil { + w.endSpan(err) + } + + return err +} + +func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream { + return &clientStream{ + ClientStream: s, + span: span, + desc: desc, + receivedEvent: cfg.ReceivedEvent, + sentEvent: cfg.SentEvent, + } +} + +func (w *clientStream) endSpan(err error) { + if err != nil { + s, _ := status.FromError(err) + w.span.SetStatus(codes.Error, s.Message()) + w.span.SetAttributes(statusCodeAttr(s.Code())) + } else { + w.span.SetAttributes(statusCodeAttr(grpc_codes.OK)) + } + + w.span.End() +} + +// StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable +// for use in a grpc.Dial call. +// +// Deprecated: Use [NewClientHandler] instead. +func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { + cfg := newConfig(opts, "client") + tracer := cfg.TracerProvider.Tracer( + ScopeName, + trace.WithInstrumentationVersion(Version()), + ) + + return func( + ctx context.Context, + desc *grpc.StreamDesc, + cc *grpc.ClientConn, + method string, + streamer grpc.Streamer, + callOpts ...grpc.CallOption, + ) (grpc.ClientStream, error) { + i := &InterceptorInfo{ + Method: method, + Type: StreamClient, + } + if cfg.Filter != nil && !cfg.Filter(i) { + return streamer(ctx, desc, cc, method, callOpts...) + } + + name, attr, _ := telemetryAttributes(method, cc.Target()) + + startOpts := append([]trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(attr...), + }, + cfg.SpanStartOptions..., + ) + + ctx, span := tracer.Start( + ctx, + name, + startOpts..., + ) + + ctx = inject(ctx, cfg.Propagators) + + s, err := streamer(ctx, desc, cc, method, callOpts...) + if err != nil { + grpcStatus, _ := status.FromError(err) + span.SetStatus(codes.Error, grpcStatus.Message()) + span.SetAttributes(statusCodeAttr(grpcStatus.Code())) + span.End() + return s, err + } + stream := wrapClientStream(ctx, s, desc, span, cfg) + return stream, nil + } +} + +// UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable +// for use in a grpc.NewServer call. +// +// Deprecated: Use [NewServerHandler] instead. +func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { + cfg := newConfig(opts, "server") + tracer := cfg.TracerProvider.Tracer( + ScopeName, + trace.WithInstrumentationVersion(Version()), + ) + + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (interface{}, error) { + i := &InterceptorInfo{ + UnaryServerInfo: info, + Type: UnaryServer, + } + if cfg.Filter != nil && !cfg.Filter(i) { + return handler(ctx, req) + } + + ctx = extract(ctx, cfg.Propagators) + name, attr, metricAttrs := telemetryAttributes(info.FullMethod, peerFromCtx(ctx)) + + startOpts := append([]trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindServer), + trace.WithAttributes(attr...), + }, + cfg.SpanStartOptions..., + ) + + ctx, span := tracer.Start( + trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), + name, + startOpts..., + ) + defer span.End() + + if cfg.ReceivedEvent { + messageReceived.Event(ctx, 1, req) + } + + before := time.Now() + + resp, err := handler(ctx, req) + + s, _ := status.FromError(err) + if err != nil { + statusCode, msg := serverStatus(s) + span.SetStatus(statusCode, msg) + if cfg.SentEvent { + messageSent.Event(ctx, 1, s.Proto()) + } + } else { + if cfg.SentEvent { + messageSent.Event(ctx, 1, resp) + } + } + grpcStatusCodeAttr := statusCodeAttr(s.Code()) + span.SetAttributes(grpcStatusCodeAttr) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(before)) / float64(time.Millisecond) + + metricAttrs = append(metricAttrs, grpcStatusCodeAttr) + cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + + return resp, err + } +} + +// serverStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and +// SendMsg method call. +type serverStream struct { + grpc.ServerStream + ctx context.Context + + receivedMessageID int + sentMessageID int + + receivedEvent bool + sentEvent bool +} + +func (w *serverStream) Context() context.Context { + return w.ctx +} + +func (w *serverStream) RecvMsg(m interface{}) error { + err := w.ServerStream.RecvMsg(m) + + if err == nil { + w.receivedMessageID++ + if w.receivedEvent { + messageReceived.Event(w.Context(), w.receivedMessageID, m) + } + } + + return err +} + +func (w *serverStream) SendMsg(m interface{}) error { + err := w.ServerStream.SendMsg(m) + + w.sentMessageID++ + if w.sentEvent { + messageSent.Event(w.Context(), w.sentMessageID, m) + } + + return err +} + +func wrapServerStream(ctx context.Context, ss grpc.ServerStream, cfg *config) *serverStream { + return &serverStream{ + ServerStream: ss, + ctx: ctx, + receivedEvent: cfg.ReceivedEvent, + sentEvent: cfg.SentEvent, + } +} + +// StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable +// for use in a grpc.NewServer call. +// +// Deprecated: Use [NewServerHandler] instead. +func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { + cfg := newConfig(opts, "server") + tracer := cfg.TracerProvider.Tracer( + ScopeName, + trace.WithInstrumentationVersion(Version()), + ) + + return func( + srv interface{}, + ss grpc.ServerStream, + info *grpc.StreamServerInfo, + handler grpc.StreamHandler, + ) error { + ctx := ss.Context() + i := &InterceptorInfo{ + StreamServerInfo: info, + Type: StreamServer, + } + if cfg.Filter != nil && !cfg.Filter(i) { + return handler(srv, wrapServerStream(ctx, ss, cfg)) + } + + ctx = extract(ctx, cfg.Propagators) + name, attr, _ := telemetryAttributes(info.FullMethod, peerFromCtx(ctx)) + + startOpts := append([]trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindServer), + trace.WithAttributes(attr...), + }, + cfg.SpanStartOptions..., + ) + + ctx, span := tracer.Start( + trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), + name, + startOpts..., + ) + defer span.End() + + err := handler(srv, wrapServerStream(ctx, ss, cfg)) + if err != nil { + s, _ := status.FromError(err) + statusCode, msg := serverStatus(s) + span.SetStatus(statusCode, msg) + span.SetAttributes(statusCodeAttr(s.Code())) + } else { + span.SetAttributes(statusCodeAttr(grpc_codes.OK)) + } + + return err + } +} + +// telemetryAttributes returns a span name and span and metric attributes from +// the gRPC method and peer address. +func telemetryAttributes(fullMethod, peerAddress string) (string, []attribute.KeyValue, []attribute.KeyValue) { + name, methodAttrs := internal.ParseFullMethod(fullMethod) + peerAttrs := peerAttr(peerAddress) + + attrs := make([]attribute.KeyValue, 0, 1+len(methodAttrs)+len(peerAttrs)) + attrs = append(attrs, RPCSystemGRPC) + attrs = append(attrs, methodAttrs...) + metricAttrs := attrs[:1+len(methodAttrs)] + attrs = append(attrs, peerAttrs...) + return name, attrs, metricAttrs +} + +// peerAttr returns attributes about the peer address. +func peerAttr(addr string) []attribute.KeyValue { + host, p, err := net.SplitHostPort(addr) + if err != nil { + return nil + } + + if host == "" { + host = "127.0.0.1" + } + port, err := strconv.Atoi(p) + if err != nil { + return nil + } + + var attr []attribute.KeyValue + if ip := net.ParseIP(host); ip != nil { + attr = []attribute.KeyValue{ + semconv.NetSockPeerAddr(host), + semconv.NetSockPeerPort(port), + } + } else { + attr = []attribute.KeyValue{ + semconv.NetPeerName(host), + semconv.NetPeerPort(port), + } + } + + return attr +} + +// peerFromCtx returns a peer address from a context, if one exists. +func peerFromCtx(ctx context.Context) string { + p, ok := peer.FromContext(ctx) + if !ok { + return "" + } + return p.Addr.String() +} + +// statusCodeAttr returns status code attribute based on given gRPC code. +func statusCodeAttr(c grpc_codes.Code) attribute.KeyValue { + return GRPCStatusCodeKey.Int64(int64(c)) +} + +// serverStatus returns a span status code and message for a given gRPC +// status code. It maps specific gRPC status codes to a corresponding span +// status code and message. This function is intended for use on the server +// side of a gRPC connection. +// +// If the gRPC status code is Unknown, DeadlineExceeded, Unimplemented, +// Internal, Unavailable, or DataLoss, it returns a span status code of Error +// and the message from the gRPC status. Otherwise, it returns a span status +// code of Unset and an empty message. +func serverStatus(grpcStatus *status.Status) (codes.Code, string) { + switch grpcStatus.Code() { + case grpc_codes.Unknown, + grpc_codes.DeadlineExceeded, + grpc_codes.Unimplemented, + grpc_codes.Internal, + grpc_codes.Unavailable, + grpc_codes.DataLoss: + return codes.Error, grpcStatus.Message() + default: + return codes.Unset, "" + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go new file mode 100644 index 00000000000..f6116946bfd --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "google.golang.org/grpc" +) + +// InterceptorType is the flag to define which gRPC interceptor +// the InterceptorInfo object is. +type InterceptorType uint8 + +const ( + // UndefinedInterceptor is the type for the interceptor information that is not + // well initialized or categorized to other types. + UndefinedInterceptor InterceptorType = iota + // UnaryClient is the type for grpc.UnaryClient interceptor. + UnaryClient + // StreamClient is the type for grpc.StreamClient interceptor. + StreamClient + // UnaryServer is the type for grpc.UnaryServer interceptor. + UnaryServer + // StreamServer is the type for grpc.StreamServer interceptor. + StreamServer +) + +// InterceptorInfo is the union of some arguments to four types of +// gRPC interceptors. +type InterceptorInfo struct { + // Method is method name registered to UnaryClient and StreamClient + Method string + // UnaryServerInfo is the metadata for UnaryServer + UnaryServerInfo *grpc.UnaryServerInfo + // StreamServerInfo if the metadata for StreamServer + StreamServerInfo *grpc.StreamServerInfo + // Type is the type for interceptor + Type InterceptorType +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go new file mode 100644 index 00000000000..cf32a9e978c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" + +import ( + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +// ParseFullMethod returns a span name following the OpenTelemetry semantic +// conventions as well as all applicable span attribute.KeyValue attributes based +// on a gRPC's FullMethod. +// +// Parsing is consistent with grpc-go implementation: +// https://github.com/grpc/grpc-go/blob/v1.57.0/internal/grpcutil/method.go#L26-L39 +func ParseFullMethod(fullMethod string) (string, []attribute.KeyValue) { + if !strings.HasPrefix(fullMethod, "/") { + // Invalid format, does not follow `/package.service/method`. + return fullMethod, nil + } + name := fullMethod[1:] + pos := strings.LastIndex(name, "/") + if pos < 0 { + // Invalid format, does not follow `/package.service/method`. + return name, nil + } + service, method := name[:pos], name[pos+1:] + + var attrs []attribute.KeyValue + if service != "" { + attrs = append(attrs, semconv.RPCService(service)) + } + if method != "" { + attrs = append(attrs, semconv.RPCMethod(method)) + } + return name, attrs +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go new file mode 100644 index 00000000000..f585fb6ae0c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "context" + + "google.golang.org/grpc/metadata" + + "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type metadataSupplier struct { + metadata *metadata.MD +} + +// assert that metadataSupplier implements the TextMapCarrier interface. +var _ propagation.TextMapCarrier = &metadataSupplier{} + +func (s *metadataSupplier) Get(key string) string { + values := s.metadata.Get(key) + if len(values) == 0 { + return "" + } + return values[0] +} + +func (s *metadataSupplier) Set(key string, value string) { + s.metadata.Set(key, value) +} + +func (s *metadataSupplier) Keys() []string { + out := make([]string, 0, len(*s.metadata)) + for key := range *s.metadata { + out = append(out, key) + } + return out +} + +// Inject injects correlation context and span context into the gRPC +// metadata object. This function is meant to be used on outgoing +// requests. +// Deprecated: Unnecessary public func. +func Inject(ctx context.Context, md *metadata.MD, opts ...Option) { + c := newConfig(opts, "") + c.Propagators.Inject(ctx, &metadataSupplier{ + metadata: md, + }) +} + +func inject(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + md = metadata.MD{} + } + propagators.Inject(ctx, &metadataSupplier{ + metadata: &md, + }) + return metadata.NewOutgoingContext(ctx, md) +} + +// Extract returns the correlation context and span context that +// another service encoded in the gRPC metadata object with Inject. +// This function is meant to be used on incoming requests. +// Deprecated: Unnecessary public func. +func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) { + c := newConfig(opts, "") + ctx = c.Propagators.Extract(ctx, &metadataSupplier{ + metadata: md, + }) + + return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) +} + +func extract(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + md = metadata.MD{} + } + + return propagators.Extract(ctx, &metadataSupplier{ + metadata: &md, + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go new file mode 100644 index 00000000000..b65fab308f3 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +// Semantic conventions for attribute keys for gRPC. +const ( + // Name of message transmitted or received. + RPCNameKey = attribute.Key("name") + + // Type of message transmitted or received. + RPCMessageTypeKey = attribute.Key("message.type") + + // Identifier of message transmitted or received. + RPCMessageIDKey = attribute.Key("message.id") + + // The compressed size of the message transmitted or received in bytes. + RPCMessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // The uncompressed size of the message transmitted or received in + // bytes. + RPCMessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +// Semantic conventions for common RPC attributes. +var ( + // Semantic convention for gRPC as the remoting system. + RPCSystemGRPC = semconv.RPCSystemGRPC + + // Semantic convention for a message named message. + RPCNameMessage = RPCNameKey.String("message") + + // Semantic conventions for RPC message types. + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go new file mode 100644 index 00000000000..e41e6df6186 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -0,0 +1,238 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "context" + "sync/atomic" + "time" + + grpc_codes "google.golang.org/grpc/codes" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" +) + +type gRPCContextKey struct{} + +type gRPCContext struct { + messagesReceived int64 + messagesSent int64 + metricAttrs []attribute.KeyValue +} + +type serverHandler struct { + *config +} + +// NewServerHandler creates a stats.Handler for gRPC server. +func NewServerHandler(opts ...Option) stats.Handler { + h := &serverHandler{ + config: newConfig(opts, "server"), + } + + return h +} + +// TagConn can attach some information to the given context. +func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { + span := trace.SpanFromContext(ctx) + attrs := peerAttr(peerFromCtx(ctx)) + span.SetAttributes(attrs...) + return ctx +} + +// HandleConn processes the Conn stats. +func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) { +} + +// TagRPC can attach some information to the given context. +func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + ctx = extract(ctx, h.config.Propagators) + + name, attrs := internal.ParseFullMethod(info.FullMethodName) + attrs = append(attrs, RPCSystemGRPC) + ctx, _ = h.tracer.Start( + trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), + name, + trace.WithSpanKind(trace.SpanKindServer), + trace.WithAttributes(attrs...), + ) + + gctx := gRPCContext{ + metricAttrs: attrs, + } + return context.WithValue(ctx, gRPCContextKey{}, &gctx) +} + +// HandleRPC processes the RPC stats. +func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + h.handleRPC(ctx, rs) +} + +type clientHandler struct { + *config +} + +// NewClientHandler creates a stats.Handler for gRPC client. +func NewClientHandler(opts ...Option) stats.Handler { + h := &clientHandler{ + config: newConfig(opts, "client"), + } + + return h +} + +// TagRPC can attach some information to the given context. +func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + name, attrs := internal.ParseFullMethod(info.FullMethodName) + attrs = append(attrs, RPCSystemGRPC) + ctx, _ = h.tracer.Start( + ctx, + name, + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(attrs...), + ) + + gctx := gRPCContext{ + metricAttrs: attrs, + } + + return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators) +} + +// HandleRPC processes the RPC stats. +func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + h.handleRPC(ctx, rs) +} + +// TagConn can attach some information to the given context. +func (h *clientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { + span := trace.SpanFromContext(ctx) + attrs := peerAttr(cti.RemoteAddr.String()) + span.SetAttributes(attrs...) + return ctx +} + +// HandleConn processes the Conn stats. +func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) { + // no-op +} + +func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats) { + span := trace.SpanFromContext(ctx) + gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext) + var messageId int64 + metricAttrs := make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) + metricAttrs = append(metricAttrs, gctx.metricAttrs...) + wctx := withoutCancel(ctx) + + switch rs := rs.(type) { + case *stats.Begin: + case *stats.InPayload: + if gctx != nil { + messageId = atomic.AddInt64(&gctx.messagesReceived, 1) + c.rpcRequestSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + } + + if c.ReceivedEvent { + span.AddEvent("message", + trace.WithAttributes( + semconv.MessageTypeReceived, + semconv.MessageIDKey.Int64(messageId), + semconv.MessageCompressedSizeKey.Int(rs.CompressedLength), + semconv.MessageUncompressedSizeKey.Int(rs.Length), + ), + ) + } + case *stats.OutPayload: + if gctx != nil { + messageId = atomic.AddInt64(&gctx.messagesSent, 1) + c.rpcResponseSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + } + + if c.SentEvent { + span.AddEvent("message", + trace.WithAttributes( + semconv.MessageTypeSent, + semconv.MessageIDKey.Int64(messageId), + semconv.MessageCompressedSizeKey.Int(rs.CompressedLength), + semconv.MessageUncompressedSizeKey.Int(rs.Length), + ), + ) + } + case *stats.OutTrailer: + case *stats.End: + var rpcStatusAttr attribute.KeyValue + + if rs.Error != nil { + s, _ := status.FromError(rs.Error) + span.SetStatus(codes.Error, s.Message()) + rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(s.Code())) + } else { + rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(grpc_codes.OK)) + } + span.SetAttributes(rpcStatusAttr) + span.End() + + metricAttrs = append(metricAttrs, rpcStatusAttr) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond) + + c.rpcDuration.Record(wctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + c.rpcRequestsPerRPC.Record(wctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...)) + c.rpcResponsesPerRPC.Record(wctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...)) + default: + return + } +} + +func withoutCancel(parent context.Context) context.Context { + if parent == nil { + panic("cannot create context from nil parent") + } + return withoutCancelCtx{parent} +} + +type withoutCancelCtx struct { + c context.Context +} + +func (withoutCancelCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (withoutCancelCtx) Done() <-chan struct{} { + return nil +} + +func (withoutCancelCtx) Err() error { + return nil +} + +func (w withoutCancelCtx) Value(key any) any { + return w.c.Value(key) +} + +func (w withoutCancelCtx) String() string { + return "withoutCancel" +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go new file mode 100644 index 00000000000..f47c8a67514 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +// Version is the current release version of the gRPC instrumentation. +func Version() string { + return "0.46.1" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +// +// Deprecated: Use [Version] instead. +func SemVersion() string { + return Version() +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go new file mode 100644 index 00000000000..92b8cf73c97 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// DefaultClient is the default Client and is used by Get, Head, Post and PostForm. +// Please be careful of intitialization order - for example, if you change +// the global propagator, the DefaultClient might still be using the old one. +var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} + +// Get is a convenient replacement for http.Get that adds a span around the request. +func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Head is a convenient replacement for http.Head that adds a span around the request. +func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Post is a convenient replacement for http.Post that adds a span around the request. +func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", contentType) + return DefaultClient.Do(req) +} + +// PostForm is a convenient replacement for http.PostForm that adds a span around the request. +func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) { + return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go new file mode 100644 index 00000000000..9509014e87c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Attribute keys that can be added to a span. +const ( + ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read + ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded) + WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written + WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) +) + +// Server HTTP metrics. +const ( + RequestCount = "http.server.request_count" // Incoming request count total + RequestContentLength = "http.server.request_content_length" // Incoming request bytes total + ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total + ServerLatency = "http.server.duration" // Incoming end to end duration, milliseconds +) + +// Filter is a predicate used to determine whether a given http.request should +// be traced. A Filter must return true if the request should be traced. +type Filter func(*http.Request) bool + +func newTracer(tp trace.TracerProvider) trace.Tracer { + return tp.Tracer(ScopeName, trace.WithInstrumentationVersion(Version())) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go new file mode 100644 index 00000000000..a1b5b5e5aa8 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -0,0 +1,207 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +// ScopeName is the instrumentation scope name. +const ScopeName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +// config represents the configuration options available for the http.Handler +// and http.Transport types. +type config struct { + ServerName string + Tracer trace.Tracer + Meter metric.Meter + Propagators propagation.TextMapPropagator + SpanStartOptions []trace.SpanStartOption + PublicEndpoint bool + PublicEndpointFn func(*http.Request) bool + ReadEvent bool + WriteEvent bool + Filters []Filter + SpanNameFormatter func(string, *http.Request) string + ClientTrace func(context.Context) *httptrace.ClientTrace + + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider +} + +// Option interface used for setting optional config properties. +type Option interface { + apply(*config) +} + +type optionFunc func(*config) + +func (o optionFunc) apply(c *config) { + o(c) +} + +// newConfig creates a new config struct and applies opts to it. +func newConfig(opts ...Option) *config { + c := &config{ + Propagators: otel.GetTextMapPropagator(), + MeterProvider: otel.GetMeterProvider(), + } + for _, opt := range opts { + opt.apply(c) + } + + // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context. + if c.TracerProvider != nil { + c.Tracer = newTracer(c.TracerProvider) + } + + c.Meter = c.MeterProvider.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version()), + ) + + return c +} + +// WithTracerProvider specifies a tracer provider to use for creating a tracer. +// If none is specified, the global provider is used. +func WithTracerProvider(provider trace.TracerProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.TracerProvider = provider + } + }) +} + +// WithMeterProvider specifies a meter provider to use for creating a meter. +// If none is specified, the global provider is used. +func WithMeterProvider(provider metric.MeterProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.MeterProvider = provider + } + }) +} + +// WithPublicEndpoint configures the Handler to link the span with an incoming +// span context. If this option is not provided, then the association is a child +// association instead of a link. +func WithPublicEndpoint() Option { + return optionFunc(func(c *config) { + c.PublicEndpoint = true + }) +} + +// WithPublicEndpointFn runs with every request, and allows conditionnally +// configuring the Handler to link the span with an incoming span context. If +// this option is not provided or returns false, then the association is a +// child association instead of a link. +// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn. +func WithPublicEndpointFn(fn func(*http.Request) bool) Option { + return optionFunc(func(c *config) { + c.PublicEndpointFn = fn + }) +} + +// WithPropagators configures specific propagators. If this +// option isn't specified, then the global TextMapPropagator is used. +func WithPropagators(ps propagation.TextMapPropagator) Option { + return optionFunc(func(c *config) { + if ps != nil { + c.Propagators = ps + } + }) +} + +// WithSpanOptions configures an additional set of +// trace.SpanOptions, which are applied to each new span. +func WithSpanOptions(opts ...trace.SpanStartOption) Option { + return optionFunc(func(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, opts...) + }) +} + +// WithFilter adds a filter to the list of filters used by the handler. +// If any filter indicates to exclude a request then the request will not be +// traced. All filters must allow a request to be traced for a Span to be created. +// If no filters are provided then all requests are traced. +// Filters will be invoked for each processed request, it is advised to make them +// simple and fast. +func WithFilter(f Filter) Option { + return optionFunc(func(c *config) { + c.Filters = append(c.Filters, f) + }) +} + +type event int + +// Different types of events that can be recorded, see WithMessageEvents. +const ( + ReadEvents event = iota + WriteEvents +) + +// WithMessageEvents configures the Handler to record the specified events +// (span.AddEvent) on spans. By default only summary attributes are added at the +// end of the request. +// +// Valid events are: +// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read +// using the ReadBytesKey +// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write +// using the WriteBytesKey +func WithMessageEvents(events ...event) Option { + return optionFunc(func(c *config) { + for _, e := range events { + switch e { + case ReadEvents: + c.ReadEvent = true + case WriteEvents: + c.WriteEvent = true + } + } + }) +} + +// WithSpanNameFormatter takes a function that will be called on every +// request and the returned string will become the Span Name. +func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option { + return optionFunc(func(c *config) { + c.SpanNameFormatter = f + }) +} + +// WithClientTrace takes a function that returns client trace instance that will be +// applied to the requests sent through the otelhttp Transport. +func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option { + return optionFunc(func(c *config) { + c.ClientTrace = f + }) +} + +// WithServerName returns an Option that sets the name of the (virtual) server +// handling requests. +func WithServerName(server string) Option { + return optionFunc(func(c *config) { + c.ServerName = server + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go new file mode 100644 index 00000000000..38c7f01c71a --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otelhttp provides an http.Handler and functions that are intended +// to be used to add tracing by wrapping existing handlers (with Handler) and +// routes WithRouteTag. +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go new file mode 100644 index 00000000000..9a8260059d9 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -0,0 +1,287 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "io" + "net/http" + "time" + + "github.com/felixge/httpsnoop" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" +) + +// middleware is an http middleware which wraps the next handler in a span. +type middleware struct { + operation string + server string + + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + counters map[string]metric.Int64Counter + valueRecorders map[string]metric.Float64Histogram + publicEndpoint bool + publicEndpointFn func(*http.Request) bool +} + +func defaultHandlerFormatter(operation string, _ *http.Request) string { + return operation +} + +// NewHandler wraps the passed handler in a span named after the operation and +// enriches it with metrics. +func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler { + return NewMiddleware(operation, opts...)(handler) +} + +// NewMiddleware returns a tracing and metrics instrumentation middleware. +// The handler returned by the middleware wraps a handler +// in a span named after the operation and enriches it with metrics. +func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { + h := middleware{ + operation: operation, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)), + WithSpanNameFormatter(defaultHandlerFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + h.configure(c) + h.createMeasures() + + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + h.serveHTTP(w, r, next) + }) + } +} + +func (h *middleware) configure(c *config) { + h.tracer = c.Tracer + h.meter = c.Meter + h.propagators = c.Propagators + h.spanStartOptions = c.SpanStartOptions + h.readEvent = c.ReadEvent + h.writeEvent = c.WriteEvent + h.filters = c.Filters + h.spanNameFormatter = c.SpanNameFormatter + h.publicEndpoint = c.PublicEndpoint + h.publicEndpointFn = c.PublicEndpointFn + h.server = c.ServerName +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} + +func (h *middleware) createMeasures() { + h.counters = make(map[string]metric.Int64Counter) + h.valueRecorders = make(map[string]metric.Float64Histogram) + + requestBytesCounter, err := h.meter.Int64Counter( + RequestContentLength, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request content length (uncompressed)"), + ) + handleErr(err) + + responseBytesCounter, err := h.meter.Int64Counter( + ResponseContentLength, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response content length (uncompressed)"), + ) + handleErr(err) + + serverLatencyMeasure, err := h.meter.Float64Histogram( + ServerLatency, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of HTTP request handling"), + ) + handleErr(err) + + h.counters[RequestContentLength] = requestBytesCounter + h.counters[ResponseContentLength] = responseBytesCounter + h.valueRecorders[ServerLatency] = serverLatencyMeasure +} + +// serveHTTP sets up tracing and calls the given next http.Handler with the span +// context injected into the request context. +func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { + requestStartTime := time.Now() + for _, f := range h.filters { + if !f(r) { + // Simply pass through to the handler if a filter rejects the request + next.ServeHTTP(w, r) + return + } + } + + ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) + opts := []trace.SpanStartOption{ + trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...), + } + if h.server != "" { + hostAttr := semconv.NetHostName(h.server) + opts = append(opts, trace.WithAttributes(hostAttr)) + } + opts = append(opts, h.spanStartOptions...) + if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { + opts = append(opts, trace.WithNewRoot()) + // Linking incoming span context if any for public endpoint. + if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() { + opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s})) + } + } + + tracer := h.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) + defer span.End() + + readRecordFunc := func(int64) {} + if h.readEvent { + readRecordFunc = func(n int64) { + span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n))) + } + } + + var bw bodyWrapper + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + if r.Body != nil && r.Body != http.NoBody { + bw.ReadCloser = r.Body + bw.record = readRecordFunc + r.Body = &bw + } + + writeRecordFunc := func(int64) {} + if h.writeEvent { + writeRecordFunc = func(n int64) { + span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n))) + } + } + + rww := &respWriterWrapper{ + ResponseWriter: w, + record: writeRecordFunc, + ctx: ctx, + props: h.propagators, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } + + // Wrap w to use our ResponseWriter methods while also exposing + // other interfaces that w may implement (http.CloseNotifier, + // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom). + + w = httpsnoop.Wrap(w, httpsnoop.Hooks{ + Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc { + return rww.Header + }, + Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { + return rww.Write + }, + WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { + return rww.WriteHeader + }, + }) + + labeler := &Labeler{} + ctx = injectLabeler(ctx, labeler) + + next.ServeHTTP(w, r.WithContext(ctx)) + + setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err) + + // Add metrics + attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) + if rww.statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) + } + o := metric.WithAttributes(attributes...) + h.counters[RequestContentLength].Add(ctx, bw.read, o) + h.counters[ResponseContentLength].Add(ctx, rww.written, o) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + + h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, o) +} + +func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { + attributes := []attribute.KeyValue{} + + // TODO: Consider adding an event after each read and write, possibly as an + // option (defaulting to off), so as to not create needlessly verbose spans. + if read > 0 { + attributes = append(attributes, ReadBytesKey.Int64(read)) + } + if rerr != nil && rerr != io.EOF { + attributes = append(attributes, ReadErrorKey.String(rerr.Error())) + } + if wrote > 0 { + attributes = append(attributes, WroteBytesKey.Int64(wrote)) + } + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + span.SetStatus(semconvutil.HTTPServerStatus(statusCode)) + + if werr != nil && werr != io.EOF { + attributes = append(attributes, WriteErrorKey.String(werr.Error())) + } + span.SetAttributes(attributes...) +} + +// WithRouteTag annotates spans and metrics with the provided route name +// with HTTP route attribute. +func WithRouteTag(route string, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + attr := semconv.HTTPRouteKey.String(route) + + span := trace.SpanFromContext(r.Context()) + span.SetAttributes(attr) + + labeler, _ := LabelerFromContext(r.Context()) + labeler.Add(attr) + + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go new file mode 100644 index 00000000000..edf4ce3d315 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +// Generate semconvutil package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go new file mode 100644 index 00000000000..d3dede9ebbd --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -0,0 +1,552 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/httpconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "fmt" + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +// HTTPClientResponse returns trace attributes for an HTTP response received by a +// client from a server. It will return the following attributes if the related +// values are defined in resp: "http.status.code", +// "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...) +func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { + return hc.ClientResponse(resp) +} + +// HTTPClientRequest returns trace attributes for an HTTP request made by a client. +// The following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func HTTPClientRequest(req *http.Request) []attribute.KeyValue { + return hc.ClientRequest(req) +} + +// HTTPClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func HTTPClientStatus(code int) (codes.Code, string) { + return hc.ClientStatus(code) +} + +// HTTPServerRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequest(server, req) +} + +// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port". +func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequestMetrics(server, req) +} + +// HTTPServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func HTTPServerStatus(code int) (codes.Code, string) { + return hc.ServerStatus(code) +} + +// HTTPRequestHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func HTTPRequestHeader(h http.Header) []attribute.KeyValue { + return hc.RequestHeader(h) +} + +// HTTPResponseHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func HTTPResponseHeader(h http.Header) []attribute.KeyValue { + return hc.ResponseHeader(h) +} + +// httpConv are the HTTP semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type httpConv struct { + NetConv *netConv + + EnduserIDKey attribute.Key + HTTPClientIPKey attribute.Key + HTTPFlavorKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPResponseContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + HTTPUserAgentKey attribute.Key +} + +var hc = &httpConv{ + NetConv: nc, + + EnduserIDKey: semconv.EnduserIDKey, + HTTPClientIPKey: semconv.HTTPClientIPKey, + HTTPFlavorKey: semconv.HTTPFlavorKey, + HTTPMethodKey: semconv.HTTPMethodKey, + HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, + HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, + HTTPRouteKey: semconv.HTTPRouteKey, + HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, + HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, + HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, + HTTPTargetKey: semconv.HTTPTargetKey, + HTTPURLKey: semconv.HTTPURLKey, + HTTPUserAgentKey: semconv.HTTPUserAgentKey, +} + +// ClientResponse returns attributes for an HTTP response received by a client +// from a server. The following attributes are returned if the related values +// are defined in resp: "http.status.code", "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { + var n int + if resp.StatusCode > 0 { + n++ + } + if resp.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + if resp.StatusCode > 0 { + attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) + } + if resp.ContentLength > 0 { + attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) + } + return attrs +} + +// ClientRequest returns attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { + n := 3 // URL, peer name, proto, and method. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + if req.ContentLength > 0 { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.flavor(req.Proto)) + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, c.HTTPURLKey.String(u)) + + attrs = append(attrs, c.NetConv.PeerName(peer)) + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if l := req.ContentLength; l > 0 { + attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + return attrs +} + +// ServerRequest returns attributes for an HTTP request received by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { + // TODO: This currently does not add the specification required + // `http.target` attribute. It has too high of a cardinality to safely be + // added. An alternate should be added, or this comment removed, when it is + // addressed by the specification. If it is ultimately decided to continue + // not including the attribute, the HTTPTargetKey field of the httpConv + // should be removed as well. + + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + peer, peerPort := splitHostPort(req.RemoteAddr) + if peer != "" { + n++ + if peerPort > 0 { + n++ + } + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.flavor(req.Proto)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + if peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) + if peerPort > 0 { + attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) + } + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + if clientIP != "" { + attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) + } + + return attrs +} + +// ServerRequestMetrics returns metric attributes for an HTTP request received +// by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port". +func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + // TODO: This currently does not add the specification required + // `http.target` attribute. It has too high of a cardinality to safely be + // added. An alternate should be added, or this comment removed, when it is + // addressed by the specification. If it is ultimately decided to continue + // not including the attribute, the HTTPTargetKey field of the httpConv + // should be removed as well. + + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.methodMetric(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.flavor(req.Proto)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + return attrs +} + +func (c *httpConv) method(method string) attribute.KeyValue { + if method == "" { + return c.HTTPMethodKey.String(http.MethodGet) + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return c.HTTPSchemeHTTPS + } + return c.HTTPSchemeHTTP +} + +func (c *httpConv) flavor(proto string) attribute.KeyValue { + switch proto { + case "HTTP/1.0": + return c.HTTPFlavorKey.String("1.0") + case "HTTP/1.1": + return c.HTTPFlavorKey.String("1.1") + case "HTTP/2": + return c.HTTPFlavorKey.String("2.0") + case "HTTP/3": + return c.HTTPFlavorKey.String("3.0") + default: + return c.HTTPFlavorKey.String(proto) + } +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +// Return the request host and port from the first non-empty source. +func firstHostPort(source ...string) (host string, port int) { + for _, hostport := range source { + host, port = splitHostPort(hostport) + if host != "" || port > 0 { + break + } + } + return +} + +// RequestHeader returns the contents of h as OpenTelemetry attributes. +func (c *httpConv) RequestHeader(h http.Header) []attribute.KeyValue { + return c.header("http.request.header", h) +} + +// ResponseHeader returns the contents of h as OpenTelemetry attributes. +func (c *httpConv) ResponseHeader(h http.Header) []attribute.KeyValue { + return c.header("http.response.header", h) +} + +func (c *httpConv) header(prefix string, h http.Header) []attribute.KeyValue { + key := func(k string) attribute.Key { + k = strings.ToLower(k) + k = strings.ReplaceAll(k, "-", "_") + k = fmt.Sprintf("%s.%s", prefix, k) + return attribute.Key(k) + } + + attrs := make([]attribute.KeyValue, 0, len(h)) + for k, v := range h { + attrs = append(attrs, key(k).StringSlice(v)) + } + return attrs +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func (c *httpConv) ClientStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (c *httpConv) ServerStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go new file mode 100644 index 00000000000..bde8893437d --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -0,0 +1,368 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/netconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "net" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +// NetTransport returns a trace attribute describing the transport protocol of the +// passed network. See the net.Dial for information about acceptable network +// values. +func NetTransport(network string) attribute.KeyValue { + return nc.Transport(network) +} + +// NetClient returns trace attributes for a client network connection to address. +// See net.Dial for information about acceptable address values, address should +// be the same as the one used to create conn. If conn is nil, only network +// peer attributes will be returned that describe address. Otherwise, the +// socket level information about conn will also be included. +func NetClient(address string, conn net.Conn) []attribute.KeyValue { + return nc.Client(address, conn) +} + +// NetServer returns trace attributes for a network listener listening at address. +// See net.Listen for information about acceptable address values, address +// should be the same as the one used to create ln. If ln is nil, only network +// host attributes will be returned that describe address. Otherwise, the +// socket level information about ln will also be included. +func NetServer(address string, ln net.Listener) []attribute.KeyValue { + return nc.Server(address, ln) +} + +// netConv are the network semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type netConv struct { + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetSockFamilyKey attribute.Key + NetSockPeerAddrKey attribute.Key + NetSockPeerPortKey attribute.Key + NetSockHostAddrKey attribute.Key + NetSockHostPortKey attribute.Key + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportInProc attribute.KeyValue +} + +var nc = &netConv{ + NetHostNameKey: semconv.NetHostNameKey, + NetHostPortKey: semconv.NetHostPortKey, + NetPeerNameKey: semconv.NetPeerNameKey, + NetPeerPortKey: semconv.NetPeerPortKey, + NetSockFamilyKey: semconv.NetSockFamilyKey, + NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, + NetSockPeerPortKey: semconv.NetSockPeerPortKey, + NetSockHostAddrKey: semconv.NetSockHostAddrKey, + NetSockHostPortKey: semconv.NetSockHostPortKey, + NetTransportOther: semconv.NetTransportOther, + NetTransportTCP: semconv.NetTransportTCP, + NetTransportUDP: semconv.NetTransportUDP, + NetTransportInProc: semconv.NetTransportInProc, +} + +func (c *netConv) Transport(network string) attribute.KeyValue { + switch network { + case "tcp", "tcp4", "tcp6": + return c.NetTransportTCP + case "udp", "udp4", "udp6": + return c.NetTransportUDP + case "unix", "unixgram", "unixpacket": + return c.NetTransportInProc + default: + // "ip:*", "ip4:*", and "ip6:*" all are considered other. + return c.NetTransportOther + } +} + +// Host returns attributes for a network host address. +func (c *netConv) Host(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.HostName(h)) + if p > 0 { + attrs = append(attrs, c.HostPort(int(p))) + } + return attrs +} + +// Server returns attributes for a network listener listening at address. See +// net.Listen for information about acceptable address values, address should +// be the same as the one used to create ln. If ln is nil, only network host +// attributes will be returned that describe address. Otherwise, the socket +// level information about ln will also be included. +func (c *netConv) Server(address string, ln net.Listener) []attribute.KeyValue { + if ln == nil { + return c.Host(address) + } + + lAddr := ln.Addr() + if lAddr == nil { + return c.Host(address) + } + + hostName, hostPort := splitHostPort(address) + sockHostAddr, sockHostPort := splitHostPort(lAddr.String()) + network := lAddr.Network() + sockFamily := family(network, sockHostAddr) + + n := nonZeroStr(hostName, network, sockHostAddr, sockFamily) + n += positiveInt(hostPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if hostName != "" { + attr = append(attr, c.HostName(hostName)) + if hostPort > 0 { + // Only if net.host.name is set should net.host.port be. + attr = append(attr, c.HostPort(hostPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func (c *netConv) HostName(name string) attribute.KeyValue { + return c.NetHostNameKey.String(name) +} + +func (c *netConv) HostPort(port int) attribute.KeyValue { + return c.NetHostPortKey.Int(port) +} + +// Client returns attributes for a client network connection to address. See +// net.Dial for information about acceptable address values, address should be +// the same as the one used to create conn. If conn is nil, only network peer +// attributes will be returned that describe address. Otherwise, the socket +// level information about conn will also be included. +func (c *netConv) Client(address string, conn net.Conn) []attribute.KeyValue { + if conn == nil { + return c.Peer(address) + } + + lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr() + + var network string + switch { + case lAddr != nil: + network = lAddr.Network() + case rAddr != nil: + network = rAddr.Network() + default: + return c.Peer(address) + } + + peerName, peerPort := splitHostPort(address) + var ( + sockFamily string + sockPeerAddr string + sockPeerPort int + sockHostAddr string + sockHostPort int + ) + + if lAddr != nil { + sockHostAddr, sockHostPort = splitHostPort(lAddr.String()) + } + + if rAddr != nil { + sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String()) + } + + switch { + case sockHostAddr != "": + sockFamily = family(network, sockHostAddr) + case sockPeerAddr != "": + sockFamily = family(network, sockPeerAddr) + } + + n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily) + n += positiveInt(peerPort, sockPeerPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if peerName != "" { + attr = append(attr, c.PeerName(peerName)) + if peerPort > 0 { + // Only if net.peer.name is set should net.peer.port be. + attr = append(attr, c.PeerPort(peerPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockPeerAddr != "" { + attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr)) + if sockPeerPort > 0 { + // Only if net.sock.peer.addr is set should net.sock.peer.port be. + attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort)) + } + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func family(network, address string) string { + switch network { + case "unix", "unixgram", "unixpacket": + return "unix" + default: + if ip := net.ParseIP(address); ip != nil { + if ip.To4() == nil { + return "inet6" + } + return "inet" + } + } + return "" +} + +func nonZeroStr(strs ...string) int { + var n int + for _, str := range strs { + if str != "" { + n++ + } + } + return n +} + +func positiveInt(ints ...int) int { + var n int + for _, i := range ints { + if i > 0 { + n++ + } + } + return n +} + +// Peer returns attributes for a network peer address. +func (c *netConv) Peer(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.PeerName(h)) + if p > 0 { + attrs = append(attrs, c.PeerPort(int(p))) + } + return attrs +} + +func (c *netConv) PeerName(name string) attribute.KeyValue { + return c.NetPeerNameKey.String(name) +} + +func (c *netConv) PeerPort(port int) attribute.KeyValue { + return c.NetPeerPortKey.Int(port) +} + +func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue { + return c.NetSockPeerAddrKey.String(addr) +} + +func (c *netConv) SockPeerPort(port int) attribute.KeyValue { + return c.NetSockPeerPortKey.Int(port) +} + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go new file mode 100644 index 00000000000..26a51a18050 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" +) + +// Labeler is used to allow instrumented HTTP handlers to add custom attributes to +// the metrics recorded by the net/http instrumentation. +type Labeler struct { + mu sync.Mutex + attributes []attribute.KeyValue +} + +// Add attributes to a Labeler. +func (l *Labeler) Add(ls ...attribute.KeyValue) { + l.mu.Lock() + defer l.mu.Unlock() + l.attributes = append(l.attributes, ls...) +} + +// Get returns a copy of the attributes added to the Labeler. +func (l *Labeler) Get() []attribute.KeyValue { + l.mu.Lock() + defer l.mu.Unlock() + ret := make([]attribute.KeyValue, len(l.attributes)) + copy(ret, l.attributes) + return ret +} + +type labelerContextKeyType int + +const lablelerContextKey labelerContextKeyType = 0 + +func injectLabeler(ctx context.Context, l *Labeler) context.Context { + return context.WithValue(ctx, lablelerContextKey, l) +} + +// LabelerFromContext retrieves a Labeler instance from the provided context if +// one is available. If no Labeler was found in the provided context a new, empty +// Labeler is returned and the second return value is false. In this case it is +// safe to use the Labeler but any attributes added to it will not be used. +func LabelerFromContext(ctx context.Context) (*Labeler, bool) { + l, ok := ctx.Value(lablelerContextKey).(*Labeler) + if !ok { + l = &Labeler{} + } + return l, ok +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go new file mode 100644 index 00000000000..e835cac12e4 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -0,0 +1,193 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +// Transport implements the http.RoundTripper interface and wraps +// outbound HTTP(S) requests with a span. +type Transport struct { + rt http.RoundTripper + + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace +} + +var _ http.RoundTripper = &Transport{} + +// NewTransport wraps the provided http.RoundTripper with one that +// starts a span and injects the span context into the outbound request headers. +// +// If the provided http.RoundTripper is nil, http.DefaultTransport will be used +// as the base http.RoundTripper. +func NewTransport(base http.RoundTripper, opts ...Option) *Transport { + if base == nil { + base = http.DefaultTransport + } + + t := Transport{ + rt: base, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)), + WithSpanNameFormatter(defaultTransportFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + t.applyConfig(c) + + return &t +} + +func (t *Transport) applyConfig(c *config) { + t.tracer = c.Tracer + t.propagators = c.Propagators + t.spanStartOptions = c.SpanStartOptions + t.filters = c.Filters + t.spanNameFormatter = c.SpanNameFormatter + t.clientTrace = c.ClientTrace +} + +func defaultTransportFormatter(_ string, r *http.Request) string { + return "HTTP " + r.Method +} + +// RoundTrip creates a Span and propagates its context via the provided request's headers +// before handing the request to the configured base RoundTripper. The created span will +// end when the response body is closed or when a read from the body returns io.EOF. +func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { + for _, f := range t.filters { + if !f(r) { + // Simply pass through to the base RoundTripper if a filter rejects the request + return t.rt.RoundTrip(r) + } + } + + tracer := t.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options + + ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...) + + if t.clientTrace != nil { + ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) + } + + r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. + span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) + + res, err := t.rt.RoundTrip(r) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + span.End() + return res, err + } + + span.SetAttributes(semconvutil.HTTPClientResponse(res)...) + span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + res.Body = newWrappedBody(span, res.Body) + + return res, err +} + +// newWrappedBody returns a new and appropriately scoped *wrappedBody as an +// io.ReadCloser. If the passed body implements io.Writer, the returned value +// will implement io.ReadWriteCloser. +func newWrappedBody(span trace.Span, body io.ReadCloser) io.ReadCloser { + // The successful protocol switch responses will have a body that + // implement an io.ReadWriteCloser. Ensure this interface type continues + // to be satisfied if that is the case. + if _, ok := body.(io.ReadWriteCloser); ok { + return &wrappedBody{span: span, body: body} + } + + // Remove the implementation of the io.ReadWriteCloser and only implement + // the io.ReadCloser. + return struct{ io.ReadCloser }{&wrappedBody{span: span, body: body}} +} + +// wrappedBody is the response body type returned by the transport +// instrumentation to complete a span. Errors encountered when using the +// response body are recorded in span tracking the response. +// +// The span tracking the response is ended when this body is closed. +// +// If the response body implements the io.Writer interface (i.e. for +// successful protocol switches), the wrapped body also will. +type wrappedBody struct { + span trace.Span + body io.ReadCloser +} + +var _ io.ReadWriteCloser = &wrappedBody{} + +func (wb *wrappedBody) Write(p []byte) (int, error) { + // This will not panic given the guard in newWrappedBody. + n, err := wb.body.(io.Writer).Write(p) + if err != nil { + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Read(b []byte) (int, error) { + n, err := wb.body.Read(b) + + switch err { + case nil: + // nothing to do here but fall through to the return + case io.EOF: + wb.span.End() + default: + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Close() error { + wb.span.End() + if wb.body != nil { + return wb.body.Close() + } + return nil +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go new file mode 100644 index 00000000000..bd41c180421 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +// Version is the current release version of the otelhttp instrumentation. +func Version() string { + return "0.46.1" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +// +// Deprecated: Use [Version] instead. +func SemVersion() string { + return Version() +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go new file mode 100644 index 00000000000..11a35ed167f --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + + "go.opentelemetry.io/otel/propagation" +) + +var _ io.ReadCloser = &bodyWrapper{} + +// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type bodyWrapper struct { + io.ReadCloser + record func(n int64) // must not be nil + + read int64 + err error +} + +func (w *bodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + w.read += n1 + w.err = err + w.record(n1) + return n, err +} + +func (w *bodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +var _ http.ResponseWriter = &respWriterWrapper{} + +// respWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) +// that may be useful when using it in real life situations. +type respWriterWrapper struct { + http.ResponseWriter + record func(n int64) // must not be nil + + // used to inject the header + ctx context.Context + + props propagation.TextMapPropagator + + written int64 + statusCode int + err error + wroteHeader bool +} + +func (w *respWriterWrapper) Header() http.Header { + return w.ResponseWriter.Header() +} + +func (w *respWriterWrapper) Write(p []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.record(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *respWriterWrapper) WriteHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index f3355c852be..895c7664beb 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -14,12 +14,9 @@ go.work.sum gen/ /example/dice/dice -/example/fib/fib -/example/fib/traces.txt -/example/jaeger/jaeger /example/namedtracer/namedtracer +/example/otel-collector/otel-collector /example/opencensus/opencensus /example/passthrough/passthrough /example/prometheus/prometheus /example/zipkin/zipkin -/example/otel-collector/otel-collector diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 6e8eeec00fa..a62511f382e 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -12,8 +12,9 @@ linters: - depguard - errcheck - godot - - gofmt + - gofumpt - goimports + - gosec - gosimple - govet - ineffassign @@ -53,6 +54,20 @@ issues: text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive + # It's okay to not run gosec in a test. + - path: _test\.go + linters: + - gosec + # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # as we commonly use it in tests and examples. + - text: "G404:" + linters: + - gosec + # Igonoring gosec G402: TLS MinVersion too low + # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. + - text: "G402: TLS MinVersion too low." + linters: + - gosec include: # revive exported should have comment or be unexported. - EXC0012 diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 3e5c35b5dcc..24874f856e3 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,85 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.21.0/0.44.0] 2023-11-16 + +### Removed + +- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706) +- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707) +- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708) +- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723) + +### Fixed + +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719) +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719) + +## [1.20.0/0.43.0] 2023-11-10 + +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. + +### Added + +- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567) +- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584) +- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620) +- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620) +- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644) +- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660) +- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622) +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585) +- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605) +- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567) +- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618) +- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`. + Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620) +- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693) + +### Changed + +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) +- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. + This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. + This extends the `Tracer` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. + This extends the `Span` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679) + +### Fixed + +- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699) +- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695) + ## [1.19.0/0.42.0/0.0.7] 2023-09-28 This release contains the first stable release of the OpenTelemetry Go [metric SDK]. @@ -2656,7 +2735,9 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.19.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...HEAD +[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 +[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 [1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 [1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 [1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 @@ -2731,7 +2812,7 @@ It contains api and sdk for trace and meter. [Go 1.20]: https://go.dev/doc/go1.20 [Go 1.19]: https://go.dev/doc/go1.19 [Go 1.18]: https://go.dev/doc/go1.18 -[Go 1.19]: https://go.dev/doc/go1.19 [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric [metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric +[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index a00dbca7b08..850606ae692 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -90,6 +90,10 @@ git push Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull request ID to the entry you added to `CHANGELOG.md`. +Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request. +Rewriting Git history makes it difficult to keep track of iterations during code review. +All pull requests are squashed to a single commit upon merge to `main`. + ### How to Receive Comments * If the PR is not ready for review, please put `[WIP]` in the title, diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 5c311706b0c..35fc189961b 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -77,6 +77,9 @@ $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl GORELEASE = $(TOOLS)/gorelease $(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease +GOVULNCHECK = $(TOOLS)/govulncheck +$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck + .PHONY: tools tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) @@ -189,6 +192,18 @@ test-coverage: | $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt +# Adding a directory will include all benchmarks in that direcotry if a filter is not specified. +BENCHMARK_TARGETS := sdk/trace +.PHONY: benchmark +benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) +BENCHMARK_FILTER = . +# You can override the filter for a particular directory by adding a rule here. +benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark/%: + @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + && cd $* \ + $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix golangci-lint-fix: golangci-lint @@ -216,7 +231,7 @@ go-mod-tidy/%: | crosslink lint-modules: go-mod-tidy .PHONY: lint -lint: misspell lint-modules golangci-lint +lint: misspell lint-modules golangci-lint govulncheck .PHONY: vanity-import-check vanity-import-check: | $(PORTO) @@ -226,6 +241,14 @@ vanity-import-check: | $(PORTO) misspell: | $(MISSPELL) @$(MISSPELL) -w $(ALL_DOCS) +.PHONY: govulncheck +govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) +govulncheck/%: DIR=$* +govulncheck/%: | $(GOVULNCHECK) + @echo "govulncheck ./... in $(DIR)" \ + && cd $(DIR) \ + && $(GOVULNCHECK) ./... + .PHONY: codespell codespell: | $(CODESPELL) @$(DOCKERPY) $(CODESPELL) @@ -289,3 +312,7 @@ COMMIT ?= "HEAD" add-tags: | $(MULTIMOD) @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + +.PHONY: lint-markdown +lint-markdown: + docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 634326ef833..2c5b0cc28ab 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -11,16 +11,13 @@ It provides a set of APIs to directly measure performance and behavior of your s ## Project Status -| Signal | Status | Project | -|---------|------------|-----------------------| -| Traces | Stable | N/A | -| Metrics | Mixed [1] | [Go: Metric SDK (GA)] | -| Logs | Frozen [2] | N/A | +| Signal | Status | +|---------|------------| +| Traces | Stable | +| Metrics | Stable | +| Logs | Design [1] | -[Go: Metric SDK (GA)]: https://github.com/orgs/open-telemetry/projects/34 - -- [1]: [Metrics API](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is Stable. [Metrics SDK](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric) is Beta. -- [2]: The Logs signal development is halted for this project while we stabilize the Metrics SDK. +- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)). No Logs Pull Requests are currently being accepted. Progress and status specific to this repository is tracked in our diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 9e6b3b7b52a..84532cb1da3 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -254,7 +254,7 @@ func NewMember(key, value string, props ...Property) (Member, error) { if err := m.validate(); err != nil { return newInvalidMember(), err } - decodedValue, err := url.QueryUnescape(value) + decodedValue, err := url.PathUnescape(value) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -301,7 +301,7 @@ func parseMember(member string) (Member, error) { // when converting the header into a data structure." key = strings.TrimSpace(k) var err error - value, err = url.QueryUnescape(strings.TrimSpace(v)) + value, err = url.PathUnescape(strings.TrimSpace(v)) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %q", err, value) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index a33eded872a..ebb13c20678 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -34,11 +34,13 @@ type afCounter struct { name string opts []metric.Float64ObservableCounterOption - delegate atomic.Value //metric.Float64ObservableCounter + delegate atomic.Value // metric.Float64ObservableCounter } -var _ unwrapper = (*afCounter)(nil) -var _ metric.Float64ObservableCounter = (*afCounter)(nil) +var ( + _ unwrapper = (*afCounter)(nil) + _ metric.Float64ObservableCounter = (*afCounter)(nil) +) func (i *afCounter) setDelegate(m metric.Meter) { ctr, err := m.Float64ObservableCounter(i.name, i.opts...) @@ -63,11 +65,13 @@ type afUpDownCounter struct { name string opts []metric.Float64ObservableUpDownCounterOption - delegate atomic.Value //metric.Float64ObservableUpDownCounter + delegate atomic.Value // metric.Float64ObservableUpDownCounter } -var _ unwrapper = (*afUpDownCounter)(nil) -var _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) +var ( + _ unwrapper = (*afUpDownCounter)(nil) + _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) +) func (i *afUpDownCounter) setDelegate(m metric.Meter) { ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) @@ -92,11 +96,13 @@ type afGauge struct { name string opts []metric.Float64ObservableGaugeOption - delegate atomic.Value //metric.Float64ObservableGauge + delegate atomic.Value // metric.Float64ObservableGauge } -var _ unwrapper = (*afGauge)(nil) -var _ metric.Float64ObservableGauge = (*afGauge)(nil) +var ( + _ unwrapper = (*afGauge)(nil) + _ metric.Float64ObservableGauge = (*afGauge)(nil) +) func (i *afGauge) setDelegate(m metric.Meter) { ctr, err := m.Float64ObservableGauge(i.name, i.opts...) @@ -121,11 +127,13 @@ type aiCounter struct { name string opts []metric.Int64ObservableCounterOption - delegate atomic.Value //metric.Int64ObservableCounter + delegate atomic.Value // metric.Int64ObservableCounter } -var _ unwrapper = (*aiCounter)(nil) -var _ metric.Int64ObservableCounter = (*aiCounter)(nil) +var ( + _ unwrapper = (*aiCounter)(nil) + _ metric.Int64ObservableCounter = (*aiCounter)(nil) +) func (i *aiCounter) setDelegate(m metric.Meter) { ctr, err := m.Int64ObservableCounter(i.name, i.opts...) @@ -150,11 +158,13 @@ type aiUpDownCounter struct { name string opts []metric.Int64ObservableUpDownCounterOption - delegate atomic.Value //metric.Int64ObservableUpDownCounter + delegate atomic.Value // metric.Int64ObservableUpDownCounter } -var _ unwrapper = (*aiUpDownCounter)(nil) -var _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) +var ( + _ unwrapper = (*aiUpDownCounter)(nil) + _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) +) func (i *aiUpDownCounter) setDelegate(m metric.Meter) { ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) @@ -179,11 +189,13 @@ type aiGauge struct { name string opts []metric.Int64ObservableGaugeOption - delegate atomic.Value //metric.Int64ObservableGauge + delegate atomic.Value // metric.Int64ObservableGauge } -var _ unwrapper = (*aiGauge)(nil) -var _ metric.Int64ObservableGauge = (*aiGauge)(nil) +var ( + _ unwrapper = (*aiGauge)(nil) + _ metric.Int64ObservableGauge = (*aiGauge)(nil) +) func (i *aiGauge) setDelegate(m metric.Meter) { ctr, err := m.Int64ObservableGauge(i.name, i.opts...) @@ -208,7 +220,7 @@ type sfCounter struct { name string opts []metric.Float64CounterOption - delegate atomic.Value //metric.Float64Counter + delegate atomic.Value // metric.Float64Counter } var _ metric.Float64Counter = (*sfCounter)(nil) @@ -234,7 +246,7 @@ type sfUpDownCounter struct { name string opts []metric.Float64UpDownCounterOption - delegate atomic.Value //metric.Float64UpDownCounter + delegate atomic.Value // metric.Float64UpDownCounter } var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) @@ -260,7 +272,7 @@ type sfHistogram struct { name string opts []metric.Float64HistogramOption - delegate atomic.Value //metric.Float64Histogram + delegate atomic.Value // metric.Float64Histogram } var _ metric.Float64Histogram = (*sfHistogram)(nil) @@ -286,7 +298,7 @@ type siCounter struct { name string opts []metric.Int64CounterOption - delegate atomic.Value //metric.Int64Counter + delegate atomic.Value // metric.Int64Counter } var _ metric.Int64Counter = (*siCounter)(nil) @@ -312,7 +324,7 @@ type siUpDownCounter struct { name string opts []metric.Int64UpDownCounterOption - delegate atomic.Value //metric.Int64UpDownCounter + delegate atomic.Value // metric.Int64UpDownCounter } var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) @@ -338,7 +350,7 @@ type siHistogram struct { name string opts []metric.Int64HistogramOption - delegate atomic.Value //metric.Int64Histogram + delegate atomic.Value // metric.Int64Histogram } var _ metric.Int64Histogram = (*siHistogram)(nil) diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 5f008d0982b..3f61ec12a34 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -39,6 +39,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" ) // tracerProvider is a placeholder for a configured SDK TracerProvider. @@ -46,6 +47,8 @@ import ( // All TracerProvider functionality is forwarded to a delegate once // configured. type tracerProvider struct { + embedded.TracerProvider + mtx sync.Mutex tracers map[il]*tracer delegate trace.TracerProvider @@ -119,6 +122,8 @@ type il struct { // All Tracer functionality is forwarded to a delegate once configured. // Otherwise, all functionality is forwarded to a NoopTracer. type tracer struct { + embedded.Tracer + name string opts []trace.TracerOption provider *tracerProvider @@ -156,6 +161,8 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart // SpanContext. It performs no operations other than to return the wrapped // SpanContext. type nonRecordingSpan struct { + embedded.Span + sc trace.SpanContext tracer *tracer } diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go index ae24e448d91..54716e13b35 100644 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -149,7 +149,7 @@ of [go.opentelemetry.io/otel/metric]. Finally, an author can embed another implementation in theirs. The embedded implementation will be used for methods not defined by the author. For example, -an author who want to default to silently dropping the call can use +an author who wants to default to silently dropping the call can use [go.opentelemetry.io/otel/metric/noop]: import "go.opentelemetry.io/otel/metric/noop" diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index cdca00058c6..be89cd53341 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -39,6 +39,12 @@ type InstrumentOption interface { Float64ObservableGaugeOption } +// HistogramOption applies options to histogram instruments. +type HistogramOption interface { + Int64HistogramOption + Float64HistogramOption +} + type descOpt string func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { @@ -171,6 +177,23 @@ func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64Ob // The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. func WithUnit(u string) InstrumentOption { return unitOpt(u) } +// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries. +// +// This option is considered "advisory", and may be ignored by API implementations. +func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) } + +type bucketOpt []float64 + +func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + // AddOption applies options to an addition measurement. See // [MeasurementOption] for other options that can be used as an AddOption. type AddOption interface { diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go index f0b063721d8..0a4825ae6a7 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -147,8 +147,9 @@ type Float64Histogram interface { // Float64HistogramConfig contains options for synchronous counter instruments // that record int64 values. type Float64HistogramConfig struct { - description string - unit string + description string + unit string + explicitBucketBoundaries []float64 } // NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all @@ -171,6 +172,11 @@ func (c Float64HistogramConfig) Unit() string { return c.unit } +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + // Float64HistogramOption applies options to a [Float64HistogramConfig]. See // [InstrumentOption] for other options that can be used as a // Float64HistogramOption. diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go index 6f508eb66d4..56667d32fc0 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -147,8 +147,9 @@ type Int64Histogram interface { // Int64HistogramConfig contains options for synchronous counter instruments // that record int64 values. type Int64HistogramConfig struct { - description string - unit string + description string + unit string + explicitBucketBoundaries []float64 } // NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts @@ -171,6 +172,11 @@ func (c Int64HistogramConfig) Unit() string { return c.unit } +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + // Int64HistogramOption applies options to a [Int64HistogramConfig]. See // [InstrumentOption] for other options that can be used as an // Int64HistogramOption. diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 902692da082..75a8f3435a5 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -40,8 +40,10 @@ const ( // their proprietary information. type TraceContext struct{} -var _ TextMapPropagator = TraceContext{} -var traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") +var ( + _ TextMapPropagator = TraceContext{} + traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") +) // Inject set tracecontext from the Context into the carrier. func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index ddff454685c..e0a43e13840 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.2.5 +codespell==2.2.6 diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go new file mode 100644 index 00000000000..71a1f7748d5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.17.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go new file mode 100644 index 00000000000..679c40c4de4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go new file mode 100644 index 00000000000..9b8c559de42 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go new file mode 100644 index 00000000000..d5c4b5c136a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go new file mode 100644 index 00000000000..39a2eab3a6a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -0,0 +1,2010 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserUserAgentKey is the attribute Key conforming to the + // "browser.user_agent" semantic conventions. It represents the full + // user-agent string provided by the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) + // AppleWebKit/537.36 (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do + // not have a mechanism to retrieve brands and platform individually from + // the User-Agent Client Hints API. To retrieve the value, the legacy + // `navigator.userAgent` API can be used. + BrowserUserAgentKey = attribute.Key("browser.user_agent") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserUserAgent returns an attribute KeyValue conforming to the +// "browser.user_agent" semantic conventions. It represents the full user-agent +// string provided by the browser +func BrowserUserAgent(val string) attribute.KeyValue { + return BrowserUserAgentKey.String(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://intl.cloud.tencent.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `faas.id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic + // conventions. It represents the unique ID of the single function that + // this runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so consider setting `faas.id` as a span attribute instead. + // + // The exact value to use for `faas.id` depends on the cloud provider: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + FaaSIDKey = attribute.Key("faas.id") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function in MiB. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic +// conventions. It represents the unique ID of the single function that this +// runtime instance executes. +func FaaSID(val string) attribute.KeyValue { + return FaaSIDKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function in MiB. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // Linux systems, the `machine-id` located in `/etc/machine-id` or + // `/var/lib/dbus/machine-id` may be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized Linux +// systems, the `machine-id` located in `/etc/machine-id` or +// `/var/lib/dbus/machine-id` may be used. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OtelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelScopeNameKey = attribute.Key("otel.scope.name") + + // OtelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OtelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OtelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OtelScopeName(val string) attribute.KeyValue { + return OtelScopeNameKey.String(val) +} + +// OtelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OtelScopeVersion(val string) attribute.KeyValue { + return OtelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OtelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelLibraryNameKey = attribute.Key("otel.library.name") + + // OtelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OtelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OtelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OtelLibraryName(val string) attribute.KeyValue { + return OtelLibraryNameKey.String(val) +} + +// OtelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OtelLibraryVersion(val string) attribute.KeyValue { + return OtelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go new file mode 100644 index 00000000000..42fc525d165 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go new file mode 100644 index 00000000000..8c4a7299d27 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -0,0 +1,3375 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable and not + // explicitly disabled via instrumentation configuration.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OtelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OtelStatusCodeKey = attribute.Key("otel.status_code") + + // OtelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OtelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OtelStatusCodeOk = OtelStatusCodeKey.String("OK") + // The operation contains an error + OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") +) + +// OtelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OtelStatusDescription(val string) attribute.KeyValue { + return OtelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function execution. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" + // semantic conventions. It represents the execution ID of the current + // function execution. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSExecution returns an attribute KeyValue conforming to the +// "faas.execution" semantic conventions. It represents the execution ID of the +// current function execution. +func FaaSExecution(val string) attribute.KeyValue { + return FaaSExecutionKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetAppProtocolNameKey is the attribute Key conforming to the + // "net.app.protocol.name" semantic conventions. It represents the + // application layer protocol used. The value SHOULD be normalized to + // lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") + + // NetAppProtocolVersionKey is the attribute Key conforming to the + // "net.app.protocol.version" semantic conventions. It represents the + // version of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.app.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client used has a version of `0.27.2`, but sends HTTP version + // `1.1`, this attribute should be set to `1.1`. + NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.host.port` and if `net.sock.host.addr` is set.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetAppProtocolName returns an attribute KeyValue conforming to the +// "net.app.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetAppProtocolName(val string) attribute.KeyValue { + return NetAppProtocolNameKey.String(val) +} + +// NetAppProtocolVersion returns an attribute KeyValue conforming to the +// "net.app.protocol.version" semantic conventions. It represents the version +// of the application layer protocol used. See note below. +func NetAppProtocolVersion(val string) attribute.KeyValue { + return NetAppProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. It represents the kind of HTTP protocol used. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be + // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is + // assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. It represents the value of the + // [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. It represents the value of the [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/path/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: 'http.route' MUST NOT be populated when this is not supported by + // the HTTP server framework as the route attribute should have + // low-cardinality and the URI path can NOT substitute it. + HTTPRouteKey = attribute.Key("http.route") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationKindKey is the attribute Key conforming to the + // "messaging.destination.kind" semantic conventions. It represents the + // kind of message destination + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceKindKey is the attribute Key conforming to the + // "messaging.source.kind" semantic conventions. It represents the kind of + // message source + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingSourceKindKey = attribute.Key("messaging.source.kind") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +var ( + // A message received from a queue + MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") + // A message received from a topic + MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index cb3efbb9ad8..3aadc66cf7a 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -268,6 +268,7 @@ func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { c.stackTrace = bool(o) return c } + func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { c.stackTrace = bool(o) return c diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index ab0346f9664..440f3d7565a 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -62,5 +62,69 @@ a default. defer span.End() // ... } + +# API Implementations + +This package does not conform to the standard Go versioning policy; all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/trace/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/trace/embedded" + + type TracerProvider struct { + embedded.TracerProvider + // ... + } + +If an author wants the default behavior of their implementations to panic, they +can embed the API interface directly. + + import "go.opentelemetry.io/otel/trace" + + type TracerProvider struct { + trace.TracerProvider + // ... + } + +This option is not recommended. It will lead to publishing packages that +contain runtime panics when users update to newer versions of +[go.opentelemetry.io/otel/trace], which may be done with a trasitive +dependency. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/trace/noop]: + + import "go.opentelemetry.io/otel/trace/noop" + + type TracerProvider struct { + noop.TracerProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. */ package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go new file mode 100644 index 00000000000..898db5a7546 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// trace API]. +// +// Implementers of the [OpenTelemetry trace API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry trace API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace +package embedded // import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider is embedded in +// [go.opentelemetry.io/otel/trace.TracerProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type TracerProvider interface{ tracerProvider() } + +// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Tracer interface{ tracer() } + +// Span is embedded in [go.opentelemetry.io/otel/trace.Span]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Span interface{ span() } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index 7cf6c7f3ef9..c125491caeb 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -19,16 +19,20 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" ) // NewNoopTracerProvider returns an implementation of TracerProvider that // performs no operations. The Tracer and Spans created from the returned // TracerProvider also perform no operations. +// +// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] +// instead. func NewNoopTracerProvider() TracerProvider { return noopTracerProvider{} } -type noopTracerProvider struct{} +type noopTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = noopTracerProvider{} @@ -38,7 +42,7 @@ func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { } // noopTracer is an implementation of Tracer that performs no operations. -type noopTracer struct{} +type noopTracer struct{ embedded.Tracer } var _ Tracer = noopTracer{} @@ -54,7 +58,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption } // noopSpan is an implementation of Span that performs no operations. -type noopSpan struct{} +type noopSpan struct{ embedded.Span } var _ Span = noopSpan{} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 4aa94f79f46..26a4b2260ec 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -22,6 +22,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -48,8 +49,10 @@ func (e errorConst) Error() string { // nolint:revive // revive complains about stutter of `trace.TraceID`. type TraceID [16]byte -var nilTraceID TraceID -var _ json.Marshaler = nilTraceID +var ( + nilTraceID TraceID + _ json.Marshaler = nilTraceID +) // IsValid checks whether the trace TraceID is valid. A valid trace ID does // not consist of zeros only. @@ -71,8 +74,10 @@ func (t TraceID) String() string { // SpanID is a unique identity of a span in a trace. type SpanID [8]byte -var nilSpanID SpanID -var _ json.Marshaler = nilSpanID +var ( + nilSpanID SpanID + _ json.Marshaler = nilSpanID +) // IsValid checks whether the SpanID is valid. A valid SpanID does not consist // of zeros only. @@ -338,8 +343,15 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { // create a Span and it is then up to the operation the Span represents to // properly end the Span when the operation itself ends. // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + // End completes the Span. The Span is considered complete and ready to be // delivered through the rest of the telemetry pipeline after this method // is called. Therefore, updates to the Span are not allowed after this @@ -486,8 +498,15 @@ func (sk SpanKind) String() string { // Tracer is the creator of Spans. // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + // Start creates a span and a context.Context containing the newly-created span. // // If the context.Context provided in `ctx` contains a Span then the newly-created @@ -518,8 +537,15 @@ type Tracer interface { // at runtime from its users or it can simply use the globally registered one // (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + // Tracer returns a unique Tracer scoped to be used by instrumentation code // to trace computational workflows. The scope and identity of that // instrumentation code is uniquely defined by the name and options passed. diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index ca68a82e5f7..d1e47ca2faa 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -28,9 +28,9 @@ const ( // based on the W3C Trace Context specification, see // https://www.w3.org/TR/trace-context-1/#tracestate-header - noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` - withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` - valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` + noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]*` + withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]*@[a-z][_0-9a-z\-\*\/]*` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]*[\x21-\x2b\x2d-\x3c\x3e-\x7e]` errInvalidKey errorConst = "invalid tracestate key" errInvalidValue errorConst = "invalid tracestate value" @@ -40,9 +40,10 @@ const ( ) var ( - keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) - valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) - memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) + noTenantKeyRe = regexp.MustCompile(`^` + noTenantKeyFormat + `$`) + withTenantKeyRe = regexp.MustCompile(`^` + withTenantKeyFormat + `$`) + valueRe = regexp.MustCompile(`^` + valueFormat + `$`) + memberRe = regexp.MustCompile(`^\s*((?:` + noTenantKeyFormat + `)|(?:` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) ) type member struct { @@ -51,10 +52,19 @@ type member struct { } func newMember(key, value string) (member, error) { - if !keyRe.MatchString(key) { + if len(key) > 256 { return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) } - if !valueRe.MatchString(value) { + if !noTenantKeyRe.MatchString(key) { + if !withTenantKeyRe.MatchString(key) { + return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + } + atIndex := strings.LastIndex(key, "@") + if atIndex > 241 || len(key)-1-atIndex > 14 { + return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + } + } + if len(value) > 256 || !valueRe.MatchString(value) { return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) } return member{Key: key, Value: value}, nil @@ -62,14 +72,14 @@ func newMember(key, value string) (member, error) { func parseMember(m string) (member, error) { matches := memberRe.FindStringSubmatch(m) - if len(matches) != 5 { + if len(matches) != 3 { return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) } - - return member{ - Key: matches[1], - Value: matches[4], - }, nil + result, e := newMember(matches[1], matches[2]) + if e != nil { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + return result, nil } // String encodes member into a string compliant with the W3C Trace Context diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ad64e199672..e2f743585d1 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.19.0" + return "1.21.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 7d212769240..3c153c9d6fc 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -14,13 +14,12 @@ module-sets: stable-v1: - version: v1.19.0 + version: v1.21.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/fib - go.opentelemetry.io/otel/example/namedtracer - go.opentelemetry.io/otel/example/otel-collector - go.opentelemetry.io/otel/example/passthrough @@ -35,14 +34,12 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.42.0 + version: v0.44.0 modules: - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/example/opencensus - go.opentelemetry.io/otel/example/prometheus - - go.opentelemetry.io/otel/example/view - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/prometheus diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 84f9302dcfa..3356fa97b8f 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -55,6 +55,8 @@ type DialSettings struct { EnableDirectPathXds bool EnableNewAuthLibrary bool AllowNonDefaultServiceAccount bool + UniverseDomain string + DefaultUniverseDomain string // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index c95bd7144f0..104a911327f 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.152.0" +const Version = "0.154.0" diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index b2b249eec68..3fdee095c1b 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -126,6 +126,22 @@ func (w withDefaultScopes) Apply(o *internal.DialSettings) { copy(o.DefaultScopes, w) } +// WithDefaultUniverseDomain returns a ClientOption that sets the default universe domain. +// +// It should only be used internally by generated clients. +// +// This is similar to the public WithUniverse, but allows us to determine whether the user has +// overridden the default universe. +func WithDefaultUniverseDomain(ud string) option.ClientOption { + return withDefaultUniverseDomain(ud) +} + +type withDefaultUniverseDomain string + +func (w withDefaultUniverseDomain) Apply(o *internal.DialSettings) { + o.DefaultUniverseDomain = string(w) +} + // EnableJwtWithScope returns a ClientOption that specifies if scope can be used // with self-signed JWT. func EnableJwtWithScope() option.ClientOption { diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index b2085a1949a..c882c1eb482 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -343,3 +343,16 @@ func (w *withCreds) Apply(o *internal.DialSettings) { func WithCredentials(creds *google.Credentials) ClientOption { return (*withCreds)(creds) } + +// WithUniverseDomain returns a ClientOption that sets the universe domain. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithUniverseDomain(ud string) ClientOption { + return withUniverseDomain(ud) +} + +type withUniverseDomain string + +func (w withUniverseDomain) Apply(o *internal.DialSettings) { + o.UniverseDomain = string(w) +} diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index 87a22f75863..34f9ba8bdb7 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -18,6 +18,7 @@ import ( "cloud.google.com/go/compute/metadata" "go.opencensus.io/plugin/ocgrpc" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "golang.org/x/oauth2" "golang.org/x/time/rate" "google.golang.org/api/internal" @@ -43,6 +44,9 @@ var timeoutDialerOption grpc.DialOption // Log rate limiter var logRateLimiter = rate.Sometimes{Interval: 1 * time.Second} +// Assign to var for unit test replacement +var dialContext = grpc.DialContext + // Dial returns a GRPC connection for use communicating with a Google cloud // service, configured with the given ClientOptions. func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { @@ -195,12 +199,13 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. grpcOpts = addOCStatsHandler(grpcOpts, o) + grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, o) grpcOpts = append(grpcOpts, o.GRPCDialOpts...) if o.UserAgent != "" { grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) } - return grpc.DialContext(ctx, endpoint, grpcOpts...) + return dialContext(ctx, endpoint, grpcOpts...) } func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { @@ -210,6 +215,13 @@ func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) } +func addOpenTelemetryStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { + if settings.TelemetryDisabled { + return opts + } + return append(opts, grpc.WithStatsHandler(otelgrpc.NewClientHandler())) +} + // grpcTokenSource supplies PerRPCCredentials from an oauth.TokenSource. type grpcTokenSource struct { oauth.TokenSource diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index a07362ffdbd..7e322a17c68 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -16,6 +16,7 @@ import ( "time" "go.opencensus.io/plugin/ochttp" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" "golang.org/x/oauth2" "google.golang.org/api/googleapi/transport" @@ -69,6 +70,9 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna requestReason: settings.RequestReason, } var trans http.RoundTripper = paramTransport + // Give OpenTelemetry precedence over OpenCensus in case user configuration + // causes both to write the same header (`X-Cloud-Trace-Context`). + trans = addOpenTelemetryTransport(trans, settings) trans = addOCTransport(trans, settings) switch { case settings.NoAuth: @@ -203,6 +207,13 @@ func fallbackBaseTransport() *http.Transport { } } +func addOpenTelemetryTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper { + if settings.TelemetryDisabled { + return trans + } + return otelhttp.NewTransport(trans) +} + func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper { if settings.TelemetryDisabled { return trans diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc b/vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc new file mode 100644 index 00000000000..730e569b069 --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc @@ -0,0 +1 @@ +'|Ê&{tÄU|gGê(ìCy=+¨œòcû:u:/pœ#~žü["±4¤!­nÙAªDK<ŠufÿhÅa¿Â:ºü¸¡´B/£Ø¤¹¤ò_hÎÛSãT*wÌx¼¯¹-ç|àÀÓƒÑÄäóÌ㣗A$$â6£ÁâG)8nÏpûÆË¡3ÌšœoïÏvŽB–3¿­]xÝ“Ó2l§G•|qRÞ¯ ö2 5R–Ó×Ç$´ñ½Yè¡ÞÝ™l‘Ë«yAI"ÛŒ˜®íû¹¼kÄ|Kåþ[9ÆâÒå=°úÿŸñ|@S•3 ó#æx?¾V„,¾‚SÆÝõœwPíogÒ6&V6 ©D.dBŠ 7 \ No newline at end of file diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/.gitignore b/vendor/gopkg.in/go-jose/go-jose.v2/.gitignore new file mode 100644 index 00000000000..95a851586a5 --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/.gitignore @@ -0,0 +1,8 @@ +*~ +.*.swp +*.out +*.test +*.pem +*.cov +jose-util/jose-util +jose-util.t.err \ No newline at end of file diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/.travis.yml b/vendor/gopkg.in/go-jose/go-jose.v2/.travis.yml new file mode 100644 index 00000000000..391b99a4014 --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/.travis.yml @@ -0,0 +1,45 @@ +language: go + +sudo: false + +matrix: + fast_finish: true + allow_failures: + - go: tip + +go: +- '1.14.x' +- '1.15.x' +- tip + +go_import_path: gopkg.in/square/go-jose.v2 + +before_script: +- export PATH=$HOME/.local/bin:$PATH + +before_install: +# Install encrypted gitcookies to get around bandwidth-limits +# that is causing Travis-CI builds to fail. For more info, see +# https://github.com/golang/go/issues/12933 +- openssl aes-256-cbc -K $encrypted_1528c3c2cafd_key -iv $encrypted_1528c3c2cafd_iv -in .gitcookies.sh.enc -out .gitcookies.sh -d || true +- bash .gitcookies.sh || true +- go get github.com/wadey/gocovmerge +- go get github.com/mattn/goveralls +- go get github.com/stretchr/testify/assert +- go get github.com/stretchr/testify/require +- go get github.com/google/go-cmp/cmp +- go get golang.org/x/tools/cmd/cover || true +- go get code.google.com/p/go.tools/cmd/cover || true +- pip install cram --user + +script: +- go test . -v -covermode=count -coverprofile=profile.cov +- go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov +- go test ./jwt -v -covermode=count -coverprofile=jwt/profile.cov +- go test ./json -v # no coverage for forked encoding/json package +- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util +- cd .. + +after_success: +- gocovmerge *.cov */*.cov > merged.coverprofile +- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md b/vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md new file mode 100644 index 00000000000..61b183651c0 --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md @@ -0,0 +1,14 @@ +# Contributing + +If you would like to contribute code to go-jose you can do so through GitHub by +forking the repository and sending a pull request. + +When submitting code, please make every effort to follow existing conventions +and style in order to keep the code as readable as possible. Please also make +sure all tests pass by running `go test`, and format your code with `go fmt`. +We also recommend using `golint` and `errcheck`. + +Before your code can be accepted into the project you must also sign the +[Individual Contributor License Agreement][1]. + + [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1 diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/LICENSE b/vendor/gopkg.in/go-jose/go-jose.v2/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/README.md b/vendor/gopkg.in/go-jose/go-jose.v2/README.md new file mode 100644 index 00000000000..46b02d61d8a --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/README.md @@ -0,0 +1,118 @@ +# Go JOSE + +[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v1) +[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) +[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) +[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=v2)](https://travis-ci.org/go-jose/go-jose) +[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=v2)](https://coveralls.io/r/go-jose/go-jose) + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. This includes support for JSON Web Encryption, +JSON Web Signature, and JSON Web Token standards. + +**Disclaimer**: This library contains encryption software that is subject to +the U.S. Export Administration Regulations. You may not export, re-export, +transfer or download this code or any part of it in violation of any United +States law, directive or regulation. In particular this software may not be +exported or re-exported in any form or on any media to Iran, North Sudan, +Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any +US maintained blocked list. + +## Overview + +The implementation follows the +[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), +[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and +[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519). +Tables of supported algorithms are shown below. The library supports both +the compact and full serialization formats, and has optional support for +multiple recipients. It also comes with a small command-line utility +([`jose-util`](https://github.com/go-jose/go-jose/tree/v2/jose-util)) +for dealing with JOSE messages in a shell. + +**Note**: We use a forked version of the `encoding/json` package from the Go +standard library which uses case-sensitive matching for member names (instead +of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). +This is to avoid differences in interpretation of messages between go-jose and +libraries in other languages. + +### Versions + +We use [gopkg.in](https://gopkg.in) for versioning. + +[Version 2](https://gopkg.in/go-jose/go-jose.v2) +([branch](https://github.com/go-jose/go-jose/tree/v2), +[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current version: + + import "gopkg.in/go-jose/go-jose.v2" + +The old `v1` branch ([go-jose.v1](https://gopkg.in/go-jose/go-jose.v1)) will +still receive backported bug fixes and security fixes, but otherwise +development is frozen. All new feature development takes place on the `v2` +branch. Version 2 also contains additional sub-packages such as the +[jwt](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) implementation +contributed by [@shaxbee](https://github.com/shaxbee). + +### Supported algorithms + +See below for a table of supported algorithms. Algorithm identifiers match +the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) +standard where possible. The Godoc reference has a list of constants. + + Key encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSA-PKCS#1v1.5 | RSA1_5 + RSA-OAEP | RSA-OAEP, RSA-OAEP-256 + AES key wrap | A128KW, A192KW, A256KW + AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW + ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW + ECDH-ES (direct) | ECDH-ES1 + Direct encryption | dir1 + +1. Not supported in multi-recipient mode + + Signing / MAC | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 + RSASSA-PSS | PS256, PS384, PS512 + HMAC | HS256, HS384, HS512 + ECDSA | ES256, ES384, ES512 + Ed25519 | EdDSA2 + +2. Only available in version 2 of the package + + Content encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 + AES-GCM | A128GCM, A192GCM, A256GCM + + Compression | Algorithm identifiers(s) + :------------------------- | ------------------------------- + DEFLATE (RFC 1951) | DEF + +### Supported key types + +See below for a table of supported key types. These are understood by the +library, and can be passed to corresponding functions such as `NewEncrypter` or +`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which +allows attaching a key id. + + Algorithm(s) | Corresponding types + :------------------------- | ------------------------------- + RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) + ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) + EdDSA1 | [ed25519.PublicKey](https://godoc.org/golang.org/x/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/golang.org/x/crypto/ed25519#PrivateKey) + AES, HMAC | []byte + +1. Only available in version 2 of the package + +## Examples + +[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v1) +[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) + +Examples can be found in the Godoc +reference for this package. The +[`jose-util`](https://github.com/go-jose/go-jose/tree/v2/jose-util) +subdirectory also contains a small command-line utility which might be useful +as an example. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go b/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go new file mode 100644 index 00000000000..3ca79cc2682 --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go @@ -0,0 +1,592 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto" + "crypto/aes" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "errors" + "fmt" + "math/big" + + "golang.org/x/crypto/ed25519" + josecipher "gopkg.in/go-jose/go-jose.v2/cipher" + "gopkg.in/go-jose/go-jose.v2/json" +) + +// A generic RSA-based encrypter/verifier +type rsaEncrypterVerifier struct { + publicKey *rsa.PublicKey +} + +// A generic RSA-based decrypter/signer +type rsaDecrypterSigner struct { + privateKey *rsa.PrivateKey +} + +// A generic EC-based encrypter/verifier +type ecEncrypterVerifier struct { + publicKey *ecdsa.PublicKey +} + +type edEncrypterVerifier struct { + publicKey ed25519.PublicKey +} + +// A key generator for ECDH-ES +type ecKeyGenerator struct { + size int + algID string + publicKey *ecdsa.PublicKey +} + +// A generic EC-based decrypter/signer +type ecDecrypterSigner struct { + privateKey *ecdsa.PrivateKey +} + +type edDecrypterSigner struct { + privateKey ed25519.PrivateKey +} + +// newRSARecipient creates recipientKeyInfo based on the given key. +func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case RSA1_5, RSA_OAEP, RSA_OAEP_256: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &rsaEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newRSASigner creates a recipientSigInfo based on the given key. +func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case RS256, RS384, RS512, PS256, PS384, PS512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &rsaDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { + if sigAlg != EdDSA { + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &edDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// newECDHRecipient creates recipientKeyInfo based on the given key. +func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &ecEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newECDSASigner creates a recipientSigInfo based on the given key. +func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case ES256, ES384, ES512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &ecDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// Encrypt the given payload and update the object. +func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + encryptedKey, err := ctx.encrypt(cek, alg) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: encryptedKey, + header: &rawHeader{}, + }, nil +} + +// Encrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { + switch alg { + case RSA1_5: + return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek) + case RSA_OAEP: + return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{}) + case RSA_OAEP_256: + return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Decrypt the given payload and return the content encryption key. +func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) +} + +// Decrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { + // Note: The random reader on decrypt operations is only used for blinding, + // so stubbing is meanlingless (hence the direct use of rand.Reader). + switch alg { + case RSA1_5: + defer func() { + // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload + // because of an index out of bounds error, which we want to ignore. + // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() + // only exists for preventing crashes with unpatched versions. + // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k + // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 + _ = recover() + }() + + // Perform some input validation. + keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 + if keyBytes != len(jek) { + // Input size is incorrect, the encrypted payload should always match + // the size of the public modulus (e.g. using a 2048 bit key will + // produce 256 bytes of output). Reject this since it's invalid input. + return nil, ErrCryptoFailure + } + + cek, _, err := generator.genKey() + if err != nil { + return nil, ErrCryptoFailure + } + + // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to + // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing + // the Million Message Attack on Cryptographic Message Syntax". We are + // therefore deliberately ignoring errors here. + _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) + + return cek, nil + case RSA_OAEP: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + case RSA_OAEP_256: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Sign the given payload +func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return Signature{}, ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + var out []byte + var err error + + switch alg { + case RS256, RS384, RS512: + out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) + case PS256, PS384, PS512: + out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }) + } + + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + switch alg { + case RS256, RS384, RS512: + return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) + case PS256, PS384, PS512: + return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) + } + + return ErrUnsupportedAlgorithm +} + +// Encrypt the given payload and update the object. +func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + switch alg { + case ECDH_ES: + // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. + return recipientInfo{ + header: &rawHeader{}, + }, nil + case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientInfo{}, ErrUnsupportedAlgorithm + } + + generator := ecKeyGenerator{ + algID: string(alg), + publicKey: ctx.publicKey, + } + + switch alg { + case ECDH_ES_A128KW: + generator.size = 16 + case ECDH_ES_A192KW: + generator.size = 24 + case ECDH_ES_A256KW: + generator.size = 32 + } + + kek, header, err := generator.genKey() + if err != nil { + return recipientInfo{}, err + } + + block, err := aes.NewCipher(kek) + if err != nil { + return recipientInfo{}, err + } + + jek, err := josecipher.KeyWrap(block, cek) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: jek, + header: &header, + }, nil +} + +// Get key size for EC key generator +func (ctx ecKeyGenerator) keySize() int { + return ctx.size +} + +// Get a content encryption key for ECDH-ES +func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { + priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader) + if err != nil { + return nil, rawHeader{}, err + } + + out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) + + b, err := json.Marshal(&JSONWebKey{ + Key: &priv.PublicKey, + }) + if err != nil { + return nil, nil, err + } + + headers := rawHeader{ + headerEPK: makeRawMessage(b), + } + + return out, headers, nil +} + +// Decrypt the given payload and return the content encryption key. +func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + epk, err := headers.getEPK() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid epk header") + } + if epk == nil { + return nil, errors.New("go-jose/go-jose: missing epk header") + } + + publicKey, ok := epk.Key.(*ecdsa.PublicKey) + if publicKey == nil || !ok { + return nil, errors.New("go-jose/go-jose: invalid epk header") + } + + if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return nil, errors.New("go-jose/go-jose: invalid public key in epk header") + } + + apuData, err := headers.getAPU() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid apu header") + } + apvData, err := headers.getAPV() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid apv header") + } + + deriveKey := func(algID string, size int) []byte { + return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) + } + + var keySize int + + algorithm := headers.getAlgorithm() + switch algorithm { + case ECDH_ES: + // ECDH-ES uses direct key agreement, no key unwrapping necessary. + return deriveKey(string(headers.getEncryption()), generator.keySize()), nil + case ECDH_ES_A128KW: + keySize = 16 + case ECDH_ES_A192KW: + keySize = 24 + case ECDH_ES_A256KW: + keySize = 32 + default: + return nil, ErrUnsupportedAlgorithm + } + + key := deriveKey(string(algorithm), keySize) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + return josecipher.KeyUnwrap(block, recipient.encryptedKey) +} + +func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + if alg != EdDSA { + return Signature{}, ErrUnsupportedAlgorithm + } + + sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0)) + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: sig, + protected: &rawHeader{}, + }, nil +} + +func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + if alg != EdDSA { + return ErrUnsupportedAlgorithm + } + ok := ed25519.Verify(ctx.publicKey, payload, signature) + if !ok { + return errors.New("go-jose/go-jose: ed25519 signature failed to verify") + } + return nil +} + +// Sign the given payload +func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var expectedBitSize int + var hash crypto.Hash + + switch alg { + case ES256: + expectedBitSize = 256 + hash = crypto.SHA256 + case ES384: + expectedBitSize = 384 + hash = crypto.SHA384 + case ES512: + expectedBitSize = 521 + hash = crypto.SHA512 + } + + curveBits := ctx.privateKey.Curve.Params().BitSize + if expectedBitSize != curveBits { + return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed) + if err != nil { + return Signature{}, err + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes++ + } + + // We serialize the outputs (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var keySize int + var hash crypto.Hash + + switch alg { + case ES256: + keySize = 32 + hash = crypto.SHA256 + case ES384: + keySize = 48 + hash = crypto.SHA384 + case ES512: + keySize = 66 + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + if len(signature) != 2*keySize { + return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r := big.NewInt(0).SetBytes(signature[:keySize]) + s := big.NewInt(0).SetBytes(signature[keySize:]) + + match := ecdsa.Verify(ctx.publicKey, hashed, r, s) + if !match { + return errors.New("go-jose/go-jose: ecdsa signature failed to verify") + } + + return nil +} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go new file mode 100644 index 00000000000..87065a5b96e --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go @@ -0,0 +1,196 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "crypto/subtle" + "encoding/binary" + "errors" + "hash" +) + +const ( + nonceBytes = 16 +) + +// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. +func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { + keySize := len(key) / 2 + integrityKey := key[:keySize] + encryptionKey := key[keySize:] + + blockCipher, err := newBlockCipher(encryptionKey) + if err != nil { + return nil, err + } + + var hash func() hash.Hash + switch keySize { + case 16: + hash = sha256.New + case 24: + hash = sha512.New384 + case 32: + hash = sha512.New + } + + return &cbcAEAD{ + hash: hash, + blockCipher: blockCipher, + authtagBytes: keySize, + integrityKey: integrityKey, + }, nil +} + +// An AEAD based on CBC+HMAC +type cbcAEAD struct { + hash func() hash.Hash + authtagBytes int + integrityKey []byte + blockCipher cipher.Block +} + +func (ctx *cbcAEAD) NonceSize() int { + return nonceBytes +} + +func (ctx *cbcAEAD) Overhead() int { + // Maximum overhead is block size (for padding) plus auth tag length, where + // the length of the auth tag is equivalent to the key size. + return ctx.blockCipher.BlockSize() + ctx.authtagBytes +} + +// Seal encrypts and authenticates the plaintext. +func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { + // Output buffer -- must take care not to mangle plaintext input. + ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] + copy(ciphertext, plaintext) + ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) + + cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) + + cbc.CryptBlocks(ciphertext, ciphertext) + authtag := ctx.computeAuthTag(data, nonce, ciphertext) + + ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) + copy(out, ciphertext) + copy(out[len(ciphertext):], authtag) + + return ret +} + +// Open decrypts and authenticates the ciphertext. +func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { + if len(ciphertext) < ctx.authtagBytes { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)") + } + + offset := len(ciphertext) - ctx.authtagBytes + expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) + match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) + if match != 1 { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)") + } + + cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) + + // Make copy of ciphertext buffer, don't want to modify in place + buffer := append([]byte{}, []byte(ciphertext[:offset])...) + + if len(buffer)%ctx.blockCipher.BlockSize() > 0 { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)") + } + + cbc.CryptBlocks(buffer, buffer) + + // Remove padding + plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) + if err != nil { + return nil, err + } + + ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) + copy(out, plaintext) + + return ret, nil +} + +// Compute an authentication tag +func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { + buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) + n := 0 + n += copy(buffer, aad) + n += copy(buffer[n:], nonce) + n += copy(buffer[n:], ciphertext) + binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) + + // According to documentation, Write() on hash.Hash never fails. + hmac := hmac.New(ctx.hash, ctx.integrityKey) + _, _ = hmac.Write(buffer) + + return hmac.Sum(nil)[:ctx.authtagBytes] +} + +// resize ensures that the given slice has a capacity of at least n bytes. +// If the capacity of the slice is less than n, a new slice is allocated +// and the existing data will be copied. +func resize(in []byte, n uint64) (head, tail []byte) { + if uint64(cap(in)) >= n { + head = in[:n] + } else { + head = make([]byte, n) + copy(head, in) + } + + tail = head[len(in):] + return +} + +// Apply padding +func padBuffer(buffer []byte, blockSize int) []byte { + missing := blockSize - (len(buffer) % blockSize) + ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) + padding := bytes.Repeat([]byte{byte(missing)}, missing) + copy(out, padding) + return ret +} + +// Remove padding +func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { + if len(buffer)%blockSize != 0 { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + last := buffer[len(buffer)-1] + count := int(last) + + if count == 0 || count > blockSize || count > len(buffer) { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + padding := bytes.Repeat([]byte{last}, count) + if !bytes.HasSuffix(buffer, padding) { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + return buffer[:len(buffer)-count], nil +} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/concat_kdf.go b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/concat_kdf.go new file mode 100644 index 00000000000..f62c3bdba5d --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/concat_kdf.go @@ -0,0 +1,75 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto" + "encoding/binary" + "hash" + "io" +) + +type concatKDF struct { + z, info []byte + i uint32 + cache []byte + hasher hash.Hash +} + +// NewConcatKDF builds a KDF reader based on the given inputs. +func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { + buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) + n := 0 + n += copy(buffer, algID) + n += copy(buffer[n:], ptyUInfo) + n += copy(buffer[n:], ptyVInfo) + n += copy(buffer[n:], supPubInfo) + copy(buffer[n:], supPrivInfo) + + hasher := hash.New() + + return &concatKDF{ + z: z, + info: buffer, + hasher: hasher, + cache: []byte{}, + i: 1, + } +} + +func (ctx *concatKDF) Read(out []byte) (int, error) { + copied := copy(out, ctx.cache) + ctx.cache = ctx.cache[copied:] + + for copied < len(out) { + ctx.hasher.Reset() + + // Write on a hash.Hash never fails + _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) + _, _ = ctx.hasher.Write(ctx.z) + _, _ = ctx.hasher.Write(ctx.info) + + hash := ctx.hasher.Sum(nil) + chunkCopied := copy(out[copied:], hash) + copied += chunkCopied + ctx.cache = hash[chunkCopied:] + + ctx.i++ + } + + return copied, nil +} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/ecdh_es.go b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/ecdh_es.go new file mode 100644 index 00000000000..093c646740b --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/ecdh_es.go @@ -0,0 +1,86 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "encoding/binary" +) + +// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. +// It is an error to call this function with a private/public key that are not on the same +// curve. Callers must ensure that the keys are valid before calling this function. Output +// size may be at most 1<<16 bytes (64 KiB). +func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { + if size > 1<<16 { + panic("ECDH-ES output size too large, must be less than or equal to 1<<16") + } + + // algId, partyUInfo, partyVInfo inputs must be prefixed with the length + algID := lengthPrefixed([]byte(alg)) + ptyUInfo := lengthPrefixed(apuData) + ptyVInfo := lengthPrefixed(apvData) + + // suppPubInfo is the encoded length of the output size in bits + supPubInfo := make([]byte, 4) + binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) + + if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { + panic("public key not on same curve as private key") + } + + z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) + zBytes := z.Bytes() + + // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from + // the returned byte array. This can lead to a problem where zBytes will be + // shorter than expected which breaks the key derivation. Therefore we must pad + // to the full length of the expected coordinate here before calling the KDF. + octSize := dSize(priv.Curve) + if len(zBytes) != octSize { + zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...) + } + + reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) + key := make([]byte, size) + + // Read on the KDF will never fail + _, _ = reader.Read(key) + + return key +} + +// dSize returns the size in octets for a coordinate on a elliptic curve. +func dSize(curve elliptic.Curve) int { + order := curve.Params().P + bitLen := order.BitLen() + size := bitLen / 8 + if bitLen%8 != 0 { + size++ + } + return size +} + +func lengthPrefixed(data []byte) []byte { + out := make([]byte, len(data)+4) + binary.BigEndian.PutUint32(out, uint32(len(data))) + copy(out[4:], data) + return out +} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go new file mode 100644 index 00000000000..668358f981b --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go @@ -0,0 +1,109 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto/cipher" + "crypto/subtle" + "encoding/binary" + "errors" +) + +var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} + +// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. +func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { + if len(cek)%8 != 0 { + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") + } + + n := len(cek) / 8 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], cek[i*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer, defaultIV) + + for t := 0; t < 6*n; t++ { + copy(buffer[8:], r[t%n]) + + block.Encrypt(buffer, buffer) + + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] = buffer[i] ^ tBytes[i] + } + copy(r[t%n], buffer[8:]) + } + + out := make([]byte, (n+1)*8) + copy(out, buffer[:8]) + for i := range r { + copy(out[(i+1)*8:], r[i]) + } + + return out, nil +} + +// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. +func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { + if len(ciphertext)%8 != 0 { + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") + } + + n := (len(ciphertext) / 8) - 1 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], ciphertext[(i+1)*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer[:8], ciphertext[:8]) + + for t := 6*n - 1; t >= 0; t-- { + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] = buffer[i] ^ tBytes[i] + } + copy(buffer[8:], r[t%n]) + + block.Decrypt(buffer, buffer) + + copy(r[t%n], buffer[8:]) + } + + if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { + return nil, errors.New("go-jose/go-jose: failed to unwrap key") + } + + out := make([]byte, n*8) + for i := range r { + copy(out[i*8:], r[i]) + } + + return out, nil +} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go b/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go new file mode 100644 index 00000000000..73aab0fabb1 --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go @@ -0,0 +1,542 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + "reflect" + + "gopkg.in/go-jose/go-jose.v2/json" +) + +// Encrypter represents an encrypter which produces an encrypted JWE object. +type Encrypter interface { + Encrypt(plaintext []byte) (*JSONWebEncryption, error) + EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) + Options() EncrypterOptions +} + +// A generic content cipher +type contentCipher interface { + keySize() int + encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) + decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) +} + +// A key generator (for generating/getting a CEK) +type keyGenerator interface { + keySize() int + genKey() ([]byte, rawHeader, error) +} + +// A generic key encrypter +type keyEncrypter interface { + encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key +} + +// A generic key decrypter +type keyDecrypter interface { + decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key +} + +// A generic encrypter based on the given key encrypter and content cipher. +type genericEncrypter struct { + contentAlg ContentEncryption + compressionAlg CompressionAlgorithm + cipher contentCipher + recipients []recipientKeyInfo + keyGenerator keyGenerator + extraHeaders map[HeaderKey]interface{} +} + +type recipientKeyInfo struct { + keyID string + keyAlg KeyAlgorithm + keyEncrypter keyEncrypter +} + +// EncrypterOptions represents options that can be set on new encrypters. +type EncrypterOptions struct { + Compression CompressionAlgorithm + + // Optional map of additional keys to be inserted into the protected header + // of a JWS object. Some specifications which make use of JWS like to insert + // additional values here. All values must be JSON-serializable. + ExtraHeaders map[HeaderKey]interface{} +} + +// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it +// if necessary. It returns itself and so can be used in a fluent style. +func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { + if eo.ExtraHeaders == nil { + eo.ExtraHeaders = map[HeaderKey]interface{}{} + } + eo.ExtraHeaders[k] = v + return eo +} + +// WithContentType adds a content type ("cty") header and returns the updated +// EncrypterOptions. +func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderContentType, contentType) +} + +// WithType adds a type ("typ") header and returns the updated EncrypterOptions. +func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderType, typ) +} + +// Recipient represents an algorithm/key to encrypt messages to. +// +// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used +// on the password-based encryption algorithms PBES2-HS256+A128KW, +// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe +// default of 100000 will be used for the count and a 128-bit random salt will +// be generated. +type Recipient struct { + Algorithm KeyAlgorithm + Key interface{} + KeyID string + PBES2Count int + PBES2Salt []byte +} + +// NewEncrypter creates an appropriate encrypter based on the key type +func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: getContentCipher(enc), + } + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + if encrypter.cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + + var keyID string + var rawKey interface{} + switch encryptionKey := rcpt.Key.(type) { + case JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case *JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case OpaqueKeyEncrypter: + keyID, rawKey = encryptionKey.KeyID(), encryptionKey + default: + rawKey = encryptionKey + } + + switch rcpt.Algorithm { + case DIRECT: + // Direct encryption mode must be treated differently + if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { + return nil, ErrUnsupportedKeyType + } + if encrypter.cipher.keySize() != len(rawKey.([]byte)) { + return nil, ErrInvalidKeySize + } + encrypter.keyGenerator = staticKeyGenerator{ + key: rawKey.([]byte), + } + recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + case ECDH_ES: + // ECDH-ES (w/o key wrapping) is similar to DIRECT mode + typeOf := reflect.TypeOf(rawKey) + if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { + return nil, ErrUnsupportedKeyType + } + encrypter.keyGenerator = ecKeyGenerator{ + size: encrypter.cipher.keySize(), + algID: string(enc), + publicKey: rawKey.(*ecdsa.PublicKey), + } + recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + default: + // Can just add a standard recipient + encrypter.keyGenerator = randomKeyGenerator{ + size: encrypter.cipher.keySize(), + } + err := encrypter.addRecipient(rcpt) + return encrypter, err + } +} + +// NewMultiEncrypter creates a multi-encrypter based on the given parameters +func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { + cipher := getContentCipher(enc) + + if cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + if rcpts == nil || len(rcpts) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty") + } + + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: cipher, + keyGenerator: randomKeyGenerator{ + size: cipher.keySize(), + }, + } + + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + for _, recipient := range rcpts { + err := encrypter.addRecipient(recipient) + if err != nil { + return nil, err + } + } + + return encrypter, nil +} + +func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { + var recipientInfo recipientKeyInfo + + switch recipient.Algorithm { + case DIRECT, ECDH_ES: + return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) + } + + recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) + if recipient.KeyID != "" { + recipientInfo.keyID = recipient.KeyID + } + + switch recipient.Algorithm { + case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: + if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok { + sr.p2c = recipient.PBES2Count + sr.p2s = recipient.PBES2Salt + } + } + + if err == nil { + ctx.recipients = append(ctx.recipients, recipientInfo) + } + return err +} + +func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { + switch encryptionKey := encryptionKey.(type) { + case *rsa.PublicKey: + return newRSARecipient(alg, encryptionKey) + case *ecdsa.PublicKey: + return newECDHRecipient(alg, encryptionKey) + case []byte: + return newSymmetricRecipient(alg, encryptionKey) + case string: + return newSymmetricRecipient(alg, []byte(encryptionKey)) + case *JSONWebKey: + recipient, err := makeJWERecipient(alg, encryptionKey.Key) + recipient.keyID = encryptionKey.KeyID + return recipient, err + } + if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok { + return newOpaqueKeyEncrypter(alg, encrypter) + } + return recipientKeyInfo{}, ErrUnsupportedKeyType +} + +// newDecrypter creates an appropriate decrypter based on the key type +func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { + switch decryptionKey := decryptionKey.(type) { + case *rsa.PrivateKey: + return &rsaDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case *ecdsa.PrivateKey: + return &ecDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case []byte: + return &symmetricKeyCipher{ + key: decryptionKey, + }, nil + case string: + return &symmetricKeyCipher{ + key: []byte(decryptionKey), + }, nil + case JSONWebKey: + return newDecrypter(decryptionKey.Key) + case *JSONWebKey: + return newDecrypter(decryptionKey.Key) + } + if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok { + return &opaqueKeyDecrypter{decrypter: okd}, nil + } + return nil, ErrUnsupportedKeyType +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { + return ctx.EncryptWithAuthData(plaintext, nil) +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { + obj := &JSONWebEncryption{} + obj.aad = aad + + obj.protected = &rawHeader{} + err := obj.protected.set(headerEncryption, ctx.contentAlg) + if err != nil { + return nil, err + } + + obj.recipients = make([]recipientInfo, len(ctx.recipients)) + + if len(ctx.recipients) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to") + } + + cek, headers, err := ctx.keyGenerator.genKey() + if err != nil { + return nil, err + } + + obj.protected.merge(&headers) + + for i, info := range ctx.recipients { + recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) + if err != nil { + return nil, err + } + + err = recipient.header.set(headerAlgorithm, info.keyAlg) + if err != nil { + return nil, err + } + + if info.keyID != "" { + err = recipient.header.set(headerKeyID, info.keyID) + if err != nil { + return nil, err + } + } + obj.recipients[i] = recipient + } + + if len(ctx.recipients) == 1 { + // Move per-recipient headers into main protected header if there's + // only a single recipient. + obj.protected.merge(obj.recipients[0].header) + obj.recipients[0].header = nil + } + + if ctx.compressionAlg != NONE { + plaintext, err = compress(ctx.compressionAlg, plaintext) + if err != nil { + return nil, err + } + + err = obj.protected.set(headerCompression, ctx.compressionAlg) + if err != nil { + return nil, err + } + } + + for k, v := range ctx.extraHeaders { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + (*obj.protected)[k] = makeRawMessage(b) + } + + authData := obj.computeAuthData() + parts, err := ctx.cipher.encrypt(cek, authData, plaintext) + if err != nil { + return nil, err + } + + obj.iv = parts.iv + obj.ciphertext = parts.ciphertext + obj.tag = parts.tag + + return obj, nil +} + +func (ctx *genericEncrypter) Options() EncrypterOptions { + return EncrypterOptions{ + Compression: ctx.compressionAlg, + ExtraHeaders: ctx.extraHeaders, + } +} + +// Decrypt and validate the object and return the plaintext. Note that this +// function does not support multi-recipient, if you desire multi-recipient +// decryption use DecryptMulti instead. +func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { + headers := obj.mergedHeaders(nil) + + if len(obj.recipients) > 1 { + return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one") + } + + critical, err := headers.getCritical() + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") + } + + decrypter, err := newDecrypter(decryptionKey) + if err != nil { + return nil, err + } + + cipher := getContentCipher(headers.getEncryption()) + if cipher == nil { + return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + var plaintext []byte + recipient := obj.recipients[0] + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + } + + if plaintext == nil { + return nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + } + + return plaintext, err +} + +// DecryptMulti decrypts and validates the object and returns the plaintexts, +// with support for multiple recipients. It returns the index of the recipient +// for which the decryption was successful, the merged headers for that recipient, +// and the plaintext. +func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { + globalHeaders := obj.mergedHeaders(nil) + + critical, err := globalHeaders.getCritical() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") + } + + decrypter, err := newDecrypter(decryptionKey) + if err != nil { + return -1, Header{}, nil, err + } + + encryption := globalHeaders.getEncryption() + cipher := getContentCipher(encryption) + if cipher == nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption)) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + index := -1 + var plaintext []byte + var headers rawHeader + + for i, recipient := range obj.recipients { + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + if err == nil { + index = i + headers = recipientHeaders + break + } + } + } + + if plaintext == nil || err != nil { + return -1, Header{}, nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + } + + sanitized, err := headers.sanitized() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err) + } + + return index, sanitized, plaintext, err +} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/doc.go b/vendor/gopkg.in/go-jose/go-jose.v2/doc.go new file mode 100644 index 00000000000..dd1387f3f06 --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/doc.go @@ -0,0 +1,27 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. It implements encryption and signing based on +the JSON Web Encryption and JSON Web Signature standards, with optional JSON +Web Token support available in a sub-package. The library supports both the +compact and full serialization formats, and has optional support for multiple +recipients. + +*/ +package jose diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go b/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go new file mode 100644 index 00000000000..40b688b3d2d --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go @@ -0,0 +1,185 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "bytes" + "compress/flate" + "encoding/base64" + "encoding/binary" + "io" + "math/big" + "strings" + "unicode" + + "gopkg.in/go-jose/go-jose.v2/json" +) + +// Helper function to serialize known-good objects. +// Precondition: value is not a nil pointer. +func mustSerializeJSON(value interface{}) []byte { + out, err := json.Marshal(value) + if err != nil { + panic(err) + } + // We never want to serialize the top-level value "null," since it's not a + // valid JOSE message. But if a caller passes in a nil pointer to this method, + // MarshalJSON will happily serialize it as the top-level value "null". If + // that value is then embedded in another operation, for instance by being + // base64-encoded and fed as input to a signing algorithm + // (https://github.com/go-jose/go-jose/issues/22), the result will be + // incorrect. Because this method is intended for known-good objects, and a nil + // pointer is not a known-good object, we are free to panic in this case. + // Note: It's not possible to directly check whether the data pointed at by an + // interface is a nil pointer, so we do this hacky workaround. + // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I + if string(out) == "null" { + panic("Tried to serialize a nil pointer.") + } + return out +} + +// Strip all newlines and whitespace +func stripWhitespace(data string) string { + buf := strings.Builder{} + buf.Grow(len(data)) + for _, r := range data { + if !unicode.IsSpace(r) { + buf.WriteRune(r) + } + } + return buf.String() +} + +// Perform compression based on algorithm +func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return deflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Perform decompression based on algorithm +func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return inflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Compress with DEFLATE +func deflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + + // Writing to byte buffer, err is always nil + writer, _ := flate.NewWriter(output, 1) + _, _ = io.Copy(writer, bytes.NewBuffer(input)) + + err := writer.Close() + return output.Bytes(), err +} + +// Decompress with DEFLATE +func inflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + reader := flate.NewReader(bytes.NewBuffer(input)) + + _, err := io.Copy(output, reader) + if err != nil { + return nil, err + } + + err = reader.Close() + return output.Bytes(), err +} + +// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. +type byteBuffer struct { + data []byte +} + +func newBuffer(data []byte) *byteBuffer { + if data == nil { + return nil + } + return &byteBuffer{ + data: data, + } +} + +func newFixedSizeBuffer(data []byte, length int) *byteBuffer { + if len(data) > length { + panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") + } + pad := make([]byte, length-len(data)) + return newBuffer(append(pad, data...)) +} + +func newBufferFromInt(num uint64) *byteBuffer { + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, num) + return newBuffer(bytes.TrimLeft(data, "\x00")) +} + +func (b *byteBuffer) MarshalJSON() ([]byte, error) { + return json.Marshal(b.base64()) +} + +func (b *byteBuffer) UnmarshalJSON(data []byte) error { + var encoded string + err := json.Unmarshal(data, &encoded) + if err != nil { + return err + } + + if encoded == "" { + return nil + } + + decoded, err := base64.RawURLEncoding.DecodeString(encoded) + if err != nil { + return err + } + + *b = *newBuffer(decoded) + + return nil +} + +func (b *byteBuffer) base64() string { + return base64.RawURLEncoding.EncodeToString(b.data) +} + +func (b *byteBuffer) bytes() []byte { + // Handling nil here allows us to transparently handle nil slices when serializing. + if b == nil { + return nil + } + return b.data +} + +func (b byteBuffer) bigInt() *big.Int { + return new(big.Int).SetBytes(b.data) +} + +func (b byteBuffer) toInt() int { + return int(b.bigInt().Int64()) +} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/LICENSE b/vendor/gopkg.in/go-jose/go-jose.v2/json/LICENSE new file mode 100644 index 00000000000..74487567632 --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/README.md b/vendor/gopkg.in/go-jose/go-jose.v2/json/README.md new file mode 100644 index 00000000000..86de5e5581f --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/json/README.md @@ -0,0 +1,13 @@ +# Safe JSON + +This repository contains a fork of the `encoding/json` package from Go 1.6. + +The following changes were made: + +* Object deserialization uses case-sensitive member name matching instead of + [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). + This is to avoid differences in the interpretation of JOSE messages between + go-jose and libraries written in other languages. +* When deserializing a JSON object, we check for duplicate keys and reject the + input whenever we detect a duplicate. Rather than trying to work with malformed + data, we prefer to reject it right away. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go b/vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go new file mode 100644 index 00000000000..4dbc4146cf9 --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go @@ -0,0 +1,1217 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a string-keyed map, Unmarshal first +// establishes a map to use, If the map is nil, Unmarshal allocates a new map. +// Otherwise Unmarshal reuses the existing map, keeping existing entries. +// Unmarshal then stores key-value pairs from the JSON object into the map. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +type NumberUnmarshalType int + +const ( + // unmarshal a JSON number into an interface{} as a float64 + UnmarshalFloat NumberUnmarshalType = iota + // unmarshal a JSON number into an interface{} as a `json.Number` + UnmarshalJSONNumber + // unmarshal a JSON number into an interface{} as a int64 + // if value is an integer otherwise float64 + UnmarshalIntOrFloat +) + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + numberType NumberUnmarshalType +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else { + d.scan.step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: struct or map[string]T + switch v.Kind() { + case reflect.Map: + // map must have string kind + t := v.Type() + if t.Key().Kind() != reflect.String { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, []byte(key)) { + f = ff + break + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kv := reflect.ValueOf(key).Convert(v.Type().Key()) + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64, int64 or a Number +// depending on d.numberDecodeType. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + switch d.numberType { + + case UnmarshalJSONNumber: + return Number(s), nil + case UnmarshalIntOrFloat: + v, err := strconv.ParseInt(s, 10, 64) + if err == nil { + return v, nil + } + + // tries to parse integer number in scientific notation + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + + // if it has no decimal value use int64 + if fi, fd := math.Modf(f); fd == 0.0 { + return int64(fi), nil + } + return f, nil + default: + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil + } + +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go b/vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go new file mode 100644 index 00000000000..1dae8bb7cd8 --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go @@ -0,0 +1,1197 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON objects as defined in +// RFC 4627. The mapping between JSON objects and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON object. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. +// The map's key type must be string; the map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON object. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML